feat(OVA/VMDK import): transmit VMDK tables in multipart POST request (#5372)
See xoa-support#3060 The VMDK block tables could become bigger than the max allowed size of a websocket message. The tables are now sent in a multipart POST in the same transaction as the file.
This commit is contained in:
parent
e0987059d3
commit
fdf52a3d59
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
- [Remotes/NFS] Only mount with `vers=3` when no other options [#4940](https://github.com/vatesfr/xen-orchestra/issues/4940) (PR [#5354](https://github.com/vatesfr/xen-orchestra/pull/5354))
|
- [Remotes/NFS] Only mount with `vers=3` when no other options [#4940](https://github.com/vatesfr/xen-orchestra/issues/4940) (PR [#5354](https://github.com/vatesfr/xen-orchestra/pull/5354))
|
||||||
- [VM/network] Don't change VIF's locking mode automatically (PR [#5357](https://github.com/vatesfr/xen-orchestra/pull/5357))
|
- [VM/network] Don't change VIF's locking mode automatically (PR [#5357](https://github.com/vatesfr/xen-orchestra/pull/5357))
|
||||||
|
- [Import OVA] Fix 'Max payload size exceeded' error when importing huge OVAs (PR [#5372](https://github.com/vatesfr/xen-orchestra/pull/5372))
|
||||||
|
|
||||||
### Packages to release
|
### Packages to release
|
||||||
|
|
||||||
@ -40,5 +41,7 @@
|
|||||||
- xo-server-auth-ldap patch
|
- xo-server-auth-ldap patch
|
||||||
- @vates/multi-key-map minor
|
- @vates/multi-key-map minor
|
||||||
- @xen-orchestra/fs patch
|
- @xen-orchestra/fs patch
|
||||||
|
- vhd-lib major
|
||||||
|
- xo-vmdk-to-vhd major
|
||||||
- xo-server minor
|
- xo-server minor
|
||||||
- xo-web minor
|
- xo-web minor
|
||||||
|
@ -22,22 +22,25 @@ const VHD_BLOCK_SIZE_SECTORS = VHD_BLOCK_SIZE_BYTES / SECTOR_SIZE
|
|||||||
* then allocates the blocks in a forwards pass.
|
* then allocates the blocks in a forwards pass.
|
||||||
* @returns currentVhdPositionSector the first free sector after the data
|
* @returns currentVhdPositionSector the first free sector after the data
|
||||||
*/
|
*/
|
||||||
function createBAT(
|
function createBAT({
|
||||||
firstBlockPosition,
|
firstBlockPosition,
|
||||||
fragmentLogicAddressList,
|
fragmentLogicAddressList,
|
||||||
ratio,
|
fragmentSize,
|
||||||
bat,
|
bat,
|
||||||
bitmapSize
|
bitmapSize,
|
||||||
) {
|
}) {
|
||||||
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
|
let currentVhdPositionSector = firstBlockPosition / SECTOR_SIZE
|
||||||
const lastFragmentPerBlock = new Map()
|
const lastFragmentPerBlock = new Map()
|
||||||
forEachRight(fragmentLogicAddressList, fragmentLogicAddress => {
|
forEachRight(fragmentLogicAddressList, fragmentLogicAddress => {
|
||||||
assert.strictEqual(fragmentLogicAddress % SECTOR_SIZE, 0)
|
assert.strictEqual((fragmentLogicAddress * fragmentSize) % SECTOR_SIZE, 0)
|
||||||
const vhdTableIndex = Math.floor(
|
const vhdTableIndex = Math.floor(
|
||||||
fragmentLogicAddress / VHD_BLOCK_SIZE_BYTES
|
(fragmentLogicAddress * fragmentSize) / VHD_BLOCK_SIZE_BYTES
|
||||||
)
|
)
|
||||||
if (!lastFragmentPerBlock.has(vhdTableIndex)) {
|
if (!lastFragmentPerBlock.has(vhdTableIndex)) {
|
||||||
lastFragmentPerBlock.set(vhdTableIndex, fragmentLogicAddress)
|
lastFragmentPerBlock.set(
|
||||||
|
vhdTableIndex,
|
||||||
|
fragmentLogicAddress * fragmentSize
|
||||||
|
)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
const lastFragmentPerBlockArray = [...lastFragmentPerBlock]
|
const lastFragmentPerBlockArray = [...lastFragmentPerBlock]
|
||||||
@ -62,7 +65,7 @@ function createBAT(
|
|||||||
* "fragment" designate a chunk of incoming data (ie probably a VMDK grain), and "block" is a VHD block.
|
* "fragment" designate a chunk of incoming data (ie probably a VMDK grain), and "block" is a VHD block.
|
||||||
* @param diskSize
|
* @param diskSize
|
||||||
* @param fragmentSize
|
* @param fragmentSize
|
||||||
* @param fragmentLogicalAddressList
|
* @param fragmentLogicAddressList an iterable returning LBAs in multiple of fragmentSize
|
||||||
* @param fragmentIterator
|
* @param fragmentIterator
|
||||||
* @returns {Promise<Function>}
|
* @returns {Promise<Function>}
|
||||||
*/
|
*/
|
||||||
@ -70,7 +73,7 @@ function createBAT(
|
|||||||
export default async function createReadableStream(
|
export default async function createReadableStream(
|
||||||
diskSize,
|
diskSize,
|
||||||
fragmentSize,
|
fragmentSize,
|
||||||
fragmentLogicalAddressList,
|
fragmentLogicAddressList,
|
||||||
fragmentIterator
|
fragmentIterator
|
||||||
) {
|
) {
|
||||||
const ratio = VHD_BLOCK_SIZE_BYTES / fragmentSize
|
const ratio = VHD_BLOCK_SIZE_BYTES / fragmentSize
|
||||||
@ -108,19 +111,23 @@ export default async function createReadableStream(
|
|||||||
const bitmapSize =
|
const bitmapSize =
|
||||||
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
|
Math.ceil(VHD_BLOCK_SIZE_SECTORS / 8 / SECTOR_SIZE) * SECTOR_SIZE
|
||||||
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
|
const bat = Buffer.alloc(tablePhysicalSizeBytes, 0xff)
|
||||||
const [endOfData, lastFragmentPerBlock] = createBAT(
|
const [endOfData, lastFragmentPerBlock] = createBAT({
|
||||||
firstBlockPosition,
|
firstBlockPosition,
|
||||||
fragmentLogicalAddressList,
|
fragmentLogicAddressList,
|
||||||
ratio,
|
fragmentSize,
|
||||||
bat,
|
bat,
|
||||||
bitmapSize
|
bitmapSize,
|
||||||
)
|
})
|
||||||
const fileSize = endOfData * SECTOR_SIZE + FOOTER_SIZE
|
const fileSize = endOfData * SECTOR_SIZE + FOOTER_SIZE
|
||||||
let position = 0
|
let position = 0
|
||||||
|
|
||||||
function* yieldAndTrack(buffer, expectedPosition, reason) {
|
function* yieldAndTrack(buffer, expectedPosition, reason) {
|
||||||
if (expectedPosition !== undefined) {
|
if (expectedPosition !== undefined) {
|
||||||
assert.strictEqual(position, expectedPosition, reason)
|
assert.strictEqual(
|
||||||
|
position,
|
||||||
|
expectedPosition,
|
||||||
|
`${reason} (${position}|${expectedPosition})`
|
||||||
|
)
|
||||||
}
|
}
|
||||||
if (buffer.length > 0) {
|
if (buffer.length > 0) {
|
||||||
yield buffer
|
yield buffer
|
||||||
|
@ -97,6 +97,7 @@
|
|||||||
"moment-timezone": "^0.5.14",
|
"moment-timezone": "^0.5.14",
|
||||||
"ms": "^2.1.1",
|
"ms": "^2.1.1",
|
||||||
"multikey-hash": "^1.0.4",
|
"multikey-hash": "^1.0.4",
|
||||||
|
"multiparty": "^4.2.2",
|
||||||
"ndjson": "^2.0.0",
|
"ndjson": "^2.0.0",
|
||||||
"openpgp": "^4.10.4",
|
"openpgp": "^4.10.4",
|
||||||
"otplib": "^11.0.0",
|
"otplib": "^11.0.0",
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
|
import * as multiparty from 'multiparty'
|
||||||
import createLogger from '@xen-orchestra/log'
|
import createLogger from '@xen-orchestra/log'
|
||||||
import defer from 'golike-defer'
|
import defer from 'golike-defer'
|
||||||
|
import getStream from 'get-stream'
|
||||||
import pump from 'pump'
|
import pump from 'pump'
|
||||||
import { format } from 'json-rpc-peer'
|
import { format } from 'json-rpc-peer'
|
||||||
import { noSuchObject } from 'xo-common/api-errors'
|
import { noSuchObject } from 'xo-common/api-errors'
|
||||||
@ -153,8 +155,14 @@ importContent.resolve = {
|
|||||||
vdi: ['id', ['VDI'], 'operate'],
|
vdi: ['id', ['VDI'], 'operate'],
|
||||||
}
|
}
|
||||||
|
|
||||||
// -------------------------------------------------------------------
|
/**
|
||||||
|
* here we expect to receive a POST in multipart/form-data
|
||||||
|
* When importing a VMDK file:
|
||||||
|
* - The first parts are the tables in uint32 LE
|
||||||
|
* - grainLogicalAddressList : uint32 LE in VMDK blocks
|
||||||
|
* - grainFileOffsetList : uint32 LE in sectors, limits the biggest VMDK size to 2^41B (2^32 * 512B)
|
||||||
|
* - the last part is the vmdk file.
|
||||||
|
*/
|
||||||
async function handleImport(
|
async function handleImport(
|
||||||
req,
|
req,
|
||||||
res,
|
res,
|
||||||
@ -163,33 +171,59 @@ async function handleImport(
|
|||||||
req.setTimeout(43200000) // 12 hours
|
req.setTimeout(43200000) // 12 hours
|
||||||
req.length = req.headers['content-length']
|
req.length = req.headers['content-length']
|
||||||
let vhdStream, size
|
let vhdStream, size
|
||||||
if (type === 'vmdk') {
|
await new Promise((resolve, reject) => {
|
||||||
vhdStream = await vmdkToVhd(
|
const promises = []
|
||||||
req,
|
const form = new multiparty.Form()
|
||||||
vmdkData.grainLogicalAddressList,
|
form.on('error', reject)
|
||||||
vmdkData.grainFileOffsetList
|
form.on('part', async part => {
|
||||||
)
|
if (part.name !== 'file') {
|
||||||
size = vmdkData.capacity
|
promises.push(
|
||||||
} else if (type === 'vhd') {
|
(async () => {
|
||||||
vhdStream = req
|
const view = new DataView((await getStream.buffer(part)).buffer)
|
||||||
const footer = await peekFooterFromVhdStream(req)
|
const result = new Uint32Array(view.byteLength / 4)
|
||||||
size = footer.currentSize
|
for (const i in result) {
|
||||||
} else {
|
result[i] = view.getUint32(i * 4, true)
|
||||||
throw new Error(`Unknown disk type, expected "vhd" or "vmdk", got ${type}`)
|
}
|
||||||
}
|
vmdkData[part.name] = result
|
||||||
const vdi = await xapi.createVdi({
|
})()
|
||||||
name_description: description,
|
)
|
||||||
name_label: name,
|
} else {
|
||||||
size,
|
await Promise.all(promises)
|
||||||
sr: srId,
|
part.length = part.byteCount
|
||||||
|
if (type === 'vmdk') {
|
||||||
|
vhdStream = await vmdkToVhd(
|
||||||
|
part,
|
||||||
|
vmdkData.grainLogicalAddressList,
|
||||||
|
vmdkData.grainFileOffsetList
|
||||||
|
)
|
||||||
|
size = vmdkData.capacity
|
||||||
|
} else if (type === 'vhd') {
|
||||||
|
vhdStream = part
|
||||||
|
const footer = await peekFooterFromVhdStream(vhdStream)
|
||||||
|
size = footer.currentSize
|
||||||
|
} else {
|
||||||
|
throw new Error(
|
||||||
|
`Unknown disk type, expected "vhd" or "vmdk", got ${type}`
|
||||||
|
)
|
||||||
|
}
|
||||||
|
const vdi = await xapi.createVdi({
|
||||||
|
name_description: description,
|
||||||
|
name_label: name,
|
||||||
|
size,
|
||||||
|
sr: srId,
|
||||||
|
})
|
||||||
|
try {
|
||||||
|
await xapi.importVdiContent(vdi, vhdStream, VDI_FORMAT_VHD)
|
||||||
|
res.end(format.response(0, vdi.$id))
|
||||||
|
} catch (e) {
|
||||||
|
await xapi.deleteVdi(vdi)
|
||||||
|
throw e
|
||||||
|
}
|
||||||
|
resolve()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
form.parse(req)
|
||||||
})
|
})
|
||||||
try {
|
|
||||||
await xapi.importVdiContent(vdi, vhdStream, VDI_FORMAT_VHD)
|
|
||||||
res.end(format.response(0, vdi.$id))
|
|
||||||
} catch (e) {
|
|
||||||
await xapi.deleteVdi(vdi)
|
|
||||||
throw e
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// type is 'vhd' or 'vmdk'
|
// type is 'vhd' or 'vmdk'
|
||||||
@ -218,23 +252,6 @@ importDisk.params = {
|
|||||||
optional: true,
|
optional: true,
|
||||||
properties: {
|
properties: {
|
||||||
capacity: { type: 'integer' },
|
capacity: { type: 'integer' },
|
||||||
grainLogicalAddressList: {
|
|
||||||
description:
|
|
||||||
'virtual address of the blocks on the disk (LBA), in order encountered in the VMDK',
|
|
||||||
type: 'array',
|
|
||||||
items: {
|
|
||||||
type: 'integer',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
grainFileOffsetList: {
|
|
||||||
description:
|
|
||||||
'offset of the grains in the VMDK file, in order encountered in the VMDK',
|
|
||||||
optional: true,
|
|
||||||
type: 'array',
|
|
||||||
items: {
|
|
||||||
type: 'integer',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
|
import * as multiparty from 'multiparty'
|
||||||
import asyncMap from '@xen-orchestra/async-map'
|
import asyncMap from '@xen-orchestra/async-map'
|
||||||
import defer from 'golike-defer'
|
import defer from 'golike-defer'
|
||||||
|
import getStream from 'get-stream'
|
||||||
import { createLogger } from '@xen-orchestra/log'
|
import { createLogger } from '@xen-orchestra/log'
|
||||||
import { format } from 'json-rpc-peer'
|
import { format } from 'json-rpc-peer'
|
||||||
import { ignoreErrors } from 'promise-toolbox'
|
import { ignoreErrors } from 'promise-toolbox'
|
||||||
@ -1344,12 +1346,50 @@ export { export_ as export }
|
|||||||
|
|
||||||
// -------------------------------------------------------------------
|
// -------------------------------------------------------------------
|
||||||
|
|
||||||
|
/**
|
||||||
|
* here we expect to receive a POST in multipart/form-data
|
||||||
|
* When importing an OVA file:
|
||||||
|
* - The first parts are the tables in uint32 LE
|
||||||
|
* - grainLogicalAddressList : uint32 LE in VMDK blocks
|
||||||
|
* - grainFileOffsetList : uint32 LE in sectors, limits the biggest VMDK size to 2^41B (2^32 * 512B)
|
||||||
|
* - the last part is the ova file.
|
||||||
|
*/
|
||||||
async function handleVmImport(req, res, { data, srId, type, xapi }) {
|
async function handleVmImport(req, res, { data, srId, type, xapi }) {
|
||||||
// Timeout seems to be broken in Node 4.
|
// Timeout seems to be broken in Node 4.
|
||||||
// See https://github.com/nodejs/node/issues/3319
|
// See https://github.com/nodejs/node/issues/3319
|
||||||
req.setTimeout(43200000) // 12 hours
|
req.setTimeout(43200000) // 12 hours
|
||||||
const vm = await xapi.importVm(req, { data, srId, type })
|
await new Promise((resolve, reject) => {
|
||||||
res.end(format.response(0, vm.$id))
|
const form = new multiparty.Form()
|
||||||
|
const promises = []
|
||||||
|
const tables = {}
|
||||||
|
form.on('error', reject)
|
||||||
|
form.on('part', async part => {
|
||||||
|
if (part.name !== 'file') {
|
||||||
|
promises.push(
|
||||||
|
(async () => {
|
||||||
|
if (!(part.filename in tables)) {
|
||||||
|
tables[part.filename] = {}
|
||||||
|
}
|
||||||
|
const view = new DataView((await getStream.buffer(part)).buffer)
|
||||||
|
const result = new Uint32Array(view.byteLength / 4)
|
||||||
|
for (const i in result) {
|
||||||
|
result[i] = view.getUint32(i * 4, true)
|
||||||
|
}
|
||||||
|
tables[part.filename][part.name] = result
|
||||||
|
data.tables = tables
|
||||||
|
})()
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
await Promise.all(promises)
|
||||||
|
// XVA files are directly sent to xcp-ng who wants a content-length
|
||||||
|
part.length = part.byteCount
|
||||||
|
const vm = await xapi.importVm(part, { data, srId, type })
|
||||||
|
res.end(format.response(0, vm.$id))
|
||||||
|
resolve()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
form.parse(req)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: "sr_id" can be passed in URL to target a specific SR
|
// TODO: "sr_id" can be passed in URL to target a specific SR
|
||||||
|
@ -8,6 +8,14 @@ export {
|
|||||||
readCapacityAndGrainTable,
|
readCapacityAndGrainTable,
|
||||||
} from './vmdk-read-table'
|
} from './vmdk-read-table'
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param vmdkReadStream
|
||||||
|
* @param grainLogicalAddressList iterable of LBAs in VMDK grain size
|
||||||
|
* @param grainFileOffsetList iterable of offsets in sectors (512 bytes)
|
||||||
|
* @param gzipped
|
||||||
|
* @returns a stream whose bytes represent a VHD file containing the VMDK data
|
||||||
|
*/
|
||||||
async function vmdkToVhd(
|
async function vmdkToVhd(
|
||||||
vmdkReadStream,
|
vmdkReadStream,
|
||||||
grainLogicalAddressList,
|
grainLogicalAddressList,
|
||||||
|
@ -278,6 +278,8 @@ export async function parseOVAFile(
|
|||||||
if (header === null) {
|
if (header === null) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
const fileSlice = parsableFile.slice(offset, offset + header.fileSize)
|
||||||
|
fileSlice.fileName = header.fileName
|
||||||
if (
|
if (
|
||||||
!(
|
!(
|
||||||
header.fileName.startsWith('PaxHeader/') ||
|
header.fileName.startsWith('PaxHeader/') ||
|
||||||
@ -285,23 +287,19 @@ export async function parseOVAFile(
|
|||||||
)
|
)
|
||||||
) {
|
) {
|
||||||
if (header.fileName.toLowerCase().endsWith('.ovf')) {
|
if (header.fileName.toLowerCase().endsWith('.ovf')) {
|
||||||
const res = await parseOVF(
|
const res = await parseOVF(fileSlice, stringDeserializer)
|
||||||
parsableFile.slice(offset, offset + header.fileSize),
|
|
||||||
stringDeserializer
|
|
||||||
)
|
|
||||||
data = { ...data, ...res }
|
data = { ...data, ...res }
|
||||||
}
|
}
|
||||||
if (!skipVmdk && header.fileName.toLowerCase().endsWith('.vmdk')) {
|
if (!skipVmdk && header.fileName.toLowerCase().endsWith('.vmdk')) {
|
||||||
const fileSlice = parsableFile.slice(offset, offset + header.fileSize)
|
|
||||||
const readFile = async (start, end) =>
|
const readFile = async (start, end) =>
|
||||||
fileSlice.slice(start, end).read()
|
fileSlice.slice(start, end).read()
|
||||||
|
readFile.fileName = header.fileName
|
||||||
data.tables[header.fileName] = suppressUnhandledRejection(
|
data.tables[header.fileName] = suppressUnhandledRejection(
|
||||||
readVmdkGrainTable(readFile)
|
readVmdkGrainTable(readFile)
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!skipVmdk && header.fileName.toLowerCase().endsWith('.vmdk.gz')) {
|
if (!skipVmdk && header.fileName.toLowerCase().endsWith('.vmdk.gz')) {
|
||||||
const fileSlice = parsableFile.slice(offset, offset + header.fileSize)
|
|
||||||
let forwardsInflater = new pako.Inflate()
|
let forwardsInflater = new pako.Inflate()
|
||||||
|
|
||||||
const readFile = async (start, end) => {
|
const readFile = async (start, end) => {
|
||||||
@ -357,6 +355,7 @@ export async function parseOVAFile(
|
|||||||
return parseGzipFromEnd(start, end, fileSlice, header)
|
return parseGzipFromEnd(start, end, fileSlice, header)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
readFile.fileName = header.fileName
|
||||||
data.tables[header.fileName] = suppressUnhandledRejection(
|
data.tables[header.fileName] = suppressUnhandledRejection(
|
||||||
readVmdkGrainTable(readFile)
|
readVmdkGrainTable(readFile)
|
||||||
)
|
)
|
||||||
|
@ -67,9 +67,9 @@ async function grabTables(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/***
|
/***
|
||||||
*
|
* the tables are encoded in uint32 LE
|
||||||
* @param fileAccessor: (start, end) => ArrayBuffer
|
* @param fileAccessor: (start, end) => ArrayBuffer
|
||||||
* @returns {Promise<{capacityBytes: number, tablePromise: Promise<{ grainLogicalAddressList: [number], grainFileOffsetList: [number] }>}>}
|
* @returns {Promise<{capacityBytes: number, tablePromise: Promise<{ grainLogicalAddressList: ArrayBuffer, grainFileOffsetList: ArrayBuffer }>}>}
|
||||||
*/
|
*/
|
||||||
export async function readCapacityAndGrainTable(fileAccessor) {
|
export async function readCapacityAndGrainTable(fileAccessor) {
|
||||||
let headerBuffer = await fileAccessor(0, HEADER_SIZE)
|
let headerBuffer = await fileAccessor(0, HEADER_SIZE)
|
||||||
@ -128,16 +128,21 @@ export async function readCapacityAndGrainTable(fileAccessor) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
extractedGrainTable.sort(
|
extractedGrainTable.sort(
|
||||||
([i1, grainAddress1], [_i2, grainAddress2]) =>
|
([_i1, grainAddress1], [_i2, grainAddress2]) =>
|
||||||
grainAddress1 - grainAddress2
|
grainAddress1 - grainAddress2
|
||||||
)
|
)
|
||||||
const fragmentAddressList = extractedGrainTable.map(
|
|
||||||
([index, _grainAddress]) => index * grainSizeByte
|
const byteLength = 4 * extractedGrainTable.length
|
||||||
)
|
const grainLogicalAddressList = new DataView(new ArrayBuffer(byteLength))
|
||||||
const grainFileOffsetList = extractedGrainTable.map(
|
const grainFileOffsetList = new DataView(new ArrayBuffer(byteLength))
|
||||||
([_index, grainAddress]) => grainAddress * SECTOR_SIZE
|
extractedGrainTable.forEach(([index, grainAddress], i) => {
|
||||||
)
|
grainLogicalAddressList.setUint32(i * 4, index, true)
|
||||||
return { grainLogicalAddressList: fragmentAddressList, grainFileOffsetList }
|
grainFileOffsetList.setUint32(i * 4, grainAddress, true)
|
||||||
|
})
|
||||||
|
return {
|
||||||
|
grainLogicalAddressList: grainLogicalAddressList.buffer,
|
||||||
|
grainFileOffsetList: grainFileOffsetList.buffer,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -192,7 +192,7 @@ export default class VMDKDirectParser {
|
|||||||
const position = this.virtualBuffer.position
|
const position = this.virtualBuffer.position
|
||||||
const sector = await this.virtualBuffer.readChunk(
|
const sector = await this.virtualBuffer.readChunk(
|
||||||
SECTOR_SIZE,
|
SECTOR_SIZE,
|
||||||
'marker start ' + position
|
`marker starting at ${position}`
|
||||||
)
|
)
|
||||||
const marker = parseMarker(sector)
|
const marker = parseMarker(sector)
|
||||||
if (marker.size === 0) {
|
if (marker.size === 0) {
|
||||||
@ -203,7 +203,9 @@ export default class VMDKDirectParser {
|
|||||||
const remainOfBufferSize = alignedGrainDiskSize - SECTOR_SIZE
|
const remainOfBufferSize = alignedGrainDiskSize - SECTOR_SIZE
|
||||||
const remainderOfGrainBuffer = await this.virtualBuffer.readChunk(
|
const remainderOfGrainBuffer = await this.virtualBuffer.readChunk(
|
||||||
remainOfBufferSize,
|
remainOfBufferSize,
|
||||||
'grain remainder ' + this.virtualBuffer.position
|
`grain remainder ${this.virtualBuffer.position} -> ${
|
||||||
|
this.virtualBuffer.position + remainOfBufferSize
|
||||||
|
}`
|
||||||
)
|
)
|
||||||
const grainBuffer = Buffer.concat([sector, remainderOfGrainBuffer])
|
const grainBuffer = Buffer.concat([sector, remainderOfGrainBuffer])
|
||||||
const grainObject = readGrain(
|
const grainObject = readGrain(
|
||||||
@ -224,13 +226,12 @@ export default class VMDKDirectParser {
|
|||||||
tableIndex++
|
tableIndex++
|
||||||
) {
|
) {
|
||||||
const position = this.virtualBuffer.position
|
const position = this.virtualBuffer.position
|
||||||
const grainPosition = this.grainFileOffsetList[tableIndex]
|
const grainPosition = this.grainFileOffsetList[tableIndex] * SECTOR_SIZE
|
||||||
const grainSizeBytes = this.header.grainSizeSectors * 512
|
const grainSizeBytes = this.header.grainSizeSectors * SECTOR_SIZE
|
||||||
const lba = this.grainLogicalAddressList[tableIndex]
|
const lba = this.grainLogicalAddressList[tableIndex] * grainSizeBytes
|
||||||
// console.log('VMDK before blank', position, grainPosition,'lba', lba, 'tableIndex', tableIndex, 'grainFileOffsetList.length', this.grainFileOffsetList.length)
|
|
||||||
await this.virtualBuffer.readChunk(
|
await this.virtualBuffer.readChunk(
|
||||||
grainPosition - position,
|
grainPosition - position,
|
||||||
'blank before ' + position
|
`blank from ${position} to ${grainPosition}`
|
||||||
)
|
)
|
||||||
let grain
|
let grain
|
||||||
if (this.header.flags.hasMarkers) {
|
if (this.header.flags.hasMarkers) {
|
||||||
|
@ -1577,14 +1577,21 @@ export const importVm = async (file, type = 'xva', data = undefined, sr) => {
|
|||||||
const { name } = file
|
const { name } = file
|
||||||
|
|
||||||
info(_('startVmImport'), name)
|
info(_('startVmImport'), name)
|
||||||
|
const formData = new FormData()
|
||||||
if (data !== undefined && data.tables !== undefined) {
|
if (data !== undefined && data.tables !== undefined) {
|
||||||
for (const k in data.tables) {
|
for (const k in data.tables) {
|
||||||
data.tables[k] = await data.tables[k]
|
const tables = await data.tables[k]
|
||||||
|
delete data.tables[k]
|
||||||
|
for (const l in tables) {
|
||||||
|
const blob = new Blob([tables[l]])
|
||||||
|
formData.append(l, blob, k)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return _call('vm.import', { type, data, sr: resolveId(sr) }).then(
|
return _call('vm.import', { type, data, sr: resolveId(sr) }).then(
|
||||||
({ $sendTo }) =>
|
async ({ $sendTo }) => {
|
||||||
post($sendTo, file)
|
formData.append('file', file)
|
||||||
|
return post($sendTo, formData)
|
||||||
.then(res => {
|
.then(res => {
|
||||||
if (res.status !== 200) {
|
if (res.status !== 200) {
|
||||||
throw res.status
|
throw res.status
|
||||||
@ -1596,6 +1603,7 @@ export const importVm = async (file, type = 'xva', data = undefined, sr) => {
|
|||||||
error(_('vmImportFailed'), name)
|
error(_('vmImportFailed'), name)
|
||||||
throw err
|
throw err
|
||||||
})
|
})
|
||||||
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1641,6 +1649,15 @@ export const importVms = (vms, sr) =>
|
|||||||
).then(ids => ids.filter(_ => _ !== undefined))
|
).then(ids => ids.filter(_ => _ !== undefined))
|
||||||
|
|
||||||
const importDisk = async ({ description, file, name, type, vmdkData }, sr) => {
|
const importDisk = async ({ description, file, name, type, vmdkData }, sr) => {
|
||||||
|
const formData = new FormData()
|
||||||
|
if (vmdkData !== undefined) {
|
||||||
|
for (const l of ['grainLogicalAddressList', 'grainFileOffsetList']) {
|
||||||
|
const table = await vmdkData[l]
|
||||||
|
delete vmdkData[l]
|
||||||
|
const blob = new Blob([table])
|
||||||
|
formData.append(l, blob, file.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
const res = await _call('disk.import', {
|
const res = await _call('disk.import', {
|
||||||
description,
|
description,
|
||||||
name,
|
name,
|
||||||
@ -1648,11 +1665,11 @@ const importDisk = async ({ description, file, name, type, vmdkData }, sr) => {
|
|||||||
type,
|
type,
|
||||||
vmdkData,
|
vmdkData,
|
||||||
})
|
})
|
||||||
const result = await post(res.$sendTo, file)
|
formData.append('file', file)
|
||||||
|
const result = await post(res.$sendTo, formData)
|
||||||
if (result.status !== 200) {
|
if (result.status !== 200) {
|
||||||
throw result.status
|
throw result.status
|
||||||
}
|
}
|
||||||
success(_('diskImportSuccess'), name)
|
|
||||||
const body = await result.json()
|
const body = await result.json()
|
||||||
await body.result
|
await body.result
|
||||||
}
|
}
|
||||||
|
29
yarn.lock
29
yarn.lock
@ -9174,6 +9174,17 @@ http-errors@~1.6.2:
|
|||||||
setprototypeof "1.1.0"
|
setprototypeof "1.1.0"
|
||||||
statuses ">= 1.4.0 < 2"
|
statuses ">= 1.4.0 < 2"
|
||||||
|
|
||||||
|
http-errors@~1.8.0:
|
||||||
|
version "1.8.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.8.0.tgz#75d1bbe497e1044f51e4ee9e704a62f28d336507"
|
||||||
|
integrity sha512-4I8r0C5JDhT5VkvI47QktDW75rNlGVsUf/8hzjCC/wkWI/jdTRmBb9aI7erSG82r1bjKY3F6k28WnsVxB1C73A==
|
||||||
|
dependencies:
|
||||||
|
depd "~1.1.2"
|
||||||
|
inherits "2.0.4"
|
||||||
|
setprototypeof "1.2.0"
|
||||||
|
statuses ">= 1.5.0 < 2"
|
||||||
|
toidentifier "1.0.0"
|
||||||
|
|
||||||
http-parser-js@>=0.5.1:
|
http-parser-js@>=0.5.1:
|
||||||
version "0.5.2"
|
version "0.5.2"
|
||||||
resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.5.2.tgz#da2e31d237b393aae72ace43882dd7e270a8ff77"
|
resolved "https://registry.yarnpkg.com/http-parser-js/-/http-parser-js-0.5.2.tgz#da2e31d237b393aae72ace43882dd7e270a8ff77"
|
||||||
@ -12615,6 +12626,15 @@ multileveldown@^3.0.0:
|
|||||||
protocol-buffers-encodings "^1.1.0"
|
protocol-buffers-encodings "^1.1.0"
|
||||||
reachdown "^1.0.0"
|
reachdown "^1.0.0"
|
||||||
|
|
||||||
|
multiparty@^4.2.2:
|
||||||
|
version "4.2.2"
|
||||||
|
resolved "https://registry.yarnpkg.com/multiparty/-/multiparty-4.2.2.tgz#bee5fb5737247628d39dab4979ffd6d57bf60ef6"
|
||||||
|
integrity sha512-NtZLjlvsjcoGrzojtwQwn/Tm90aWJ6XXtPppYF4WmOk/6ncdwMMKggFY2NlRRN9yiCEIVxpOfPWahVEG2HAG8Q==
|
||||||
|
dependencies:
|
||||||
|
http-errors "~1.8.0"
|
||||||
|
safe-buffer "5.2.1"
|
||||||
|
uid-safe "2.1.5"
|
||||||
|
|
||||||
multipipe@^0.1.2:
|
multipipe@^0.1.2:
|
||||||
version "0.1.2"
|
version "0.1.2"
|
||||||
resolved "https://registry.yarnpkg.com/multipipe/-/multipipe-0.1.2.tgz#2a8f2ddf70eed564dff2d57f1e1a137d9f05078b"
|
resolved "https://registry.yarnpkg.com/multipipe/-/multipipe-0.1.2.tgz#2a8f2ddf70eed564dff2d57f1e1a137d9f05078b"
|
||||||
@ -16073,7 +16093,7 @@ safe-buffer@5.2.0:
|
|||||||
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.0.tgz#b74daec49b1148f88c64b68d49b1e815c1f2f519"
|
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.0.tgz#b74daec49b1148f88c64b68d49b1e815c1f2f519"
|
||||||
integrity sha512-fZEwUGbVl7kouZs1jCdMLdt95hdIv0ZeHg6L7qPeciMZhZ+/gdesW4wgTARkrFWEpspjEATAzUGPG8N2jJiwbg==
|
integrity sha512-fZEwUGbVl7kouZs1jCdMLdt95hdIv0ZeHg6L7qPeciMZhZ+/gdesW4wgTARkrFWEpspjEATAzUGPG8N2jJiwbg==
|
||||||
|
|
||||||
safe-buffer@>=5.1.0, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@~5.2.0:
|
safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@^5.2.0, safe-buffer@~5.2.0:
|
||||||
version "5.2.1"
|
version "5.2.1"
|
||||||
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6"
|
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6"
|
||||||
integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
|
integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==
|
||||||
@ -16330,6 +16350,11 @@ setprototypeof@1.1.1:
|
|||||||
resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.1.tgz#7e95acb24aa92f5885e0abef5ba131330d4ae683"
|
resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.1.1.tgz#7e95acb24aa92f5885e0abef5ba131330d4ae683"
|
||||||
integrity sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==
|
integrity sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==
|
||||||
|
|
||||||
|
setprototypeof@1.2.0:
|
||||||
|
version "1.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.2.0.tgz#66c9a24a73f9fc28cbe66b09fed3d33dcaf1b424"
|
||||||
|
integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==
|
||||||
|
|
||||||
sha.js@^2.4.0, sha.js@^2.4.8, sha.js@~2.4.4:
|
sha.js@^2.4.0, sha.js@^2.4.8, sha.js@~2.4.4:
|
||||||
version "2.4.11"
|
version "2.4.11"
|
||||||
resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7"
|
resolved "https://registry.yarnpkg.com/sha.js/-/sha.js-2.4.11.tgz#37a5cf0b81ecbc6943de109ba2960d1b26584ae7"
|
||||||
@ -17939,7 +17964,7 @@ uglify-to-browserify@~1.0.0:
|
|||||||
resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
|
resolved "https://registry.yarnpkg.com/uglify-to-browserify/-/uglify-to-browserify-1.0.2.tgz#6e0924d6bda6b5afe349e39a6d632850a0f882b7"
|
||||||
integrity sha1-bgkk1r2mta/jSeOabWMoUKD4grc=
|
integrity sha1-bgkk1r2mta/jSeOabWMoUKD4grc=
|
||||||
|
|
||||||
uid-safe@~2.1.5:
|
uid-safe@2.1.5, uid-safe@~2.1.5:
|
||||||
version "2.1.5"
|
version "2.1.5"
|
||||||
resolved "https://registry.yarnpkg.com/uid-safe/-/uid-safe-2.1.5.tgz#2b3d5c7240e8fc2e58f8aa269e5ee49c0857bd3a"
|
resolved "https://registry.yarnpkg.com/uid-safe/-/uid-safe-2.1.5.tgz#2b3d5c7240e8fc2e58f8aa269e5ee49c0857bd3a"
|
||||||
integrity sha512-KPHm4VL5dDXKz01UuEd88Df+KzynaohSL9fBh096KWAxSKZQDI2uBrVqtvRM4rwrIrRRKsdLNML/lnaaVSRioA==
|
integrity sha512-KPHm4VL5dDXKz01UuEd88Df+KzynaohSL9fBh096KWAxSKZQDI2uBrVqtvRM4rwrIrRRKsdLNML/lnaaVSRioA==
|
||||||
|
Loading…
Reference in New Issue
Block a user