fix(xo-server): import OVA with broken VMDK size in metadata (#6824)
ova generated from oracle virtualization server seems to have the size of the vmdk instead of the disk size in the metadata this will cause the transfer to fail when the import try to write data after the size of the vmdk, for example a 50GB disk make a 10GB vmdk. It will fail when import reach data in the 10-50GB range
This commit is contained in:
committed by
GitHub
parent
77b166bb3b
commit
1b0ec9839e
@@ -9,6 +9,7 @@
|
||||
|
||||
- [Proxy] Make proxy address editable (PR [#6816](https://github.com/vatesfr/xen-orchestra/pull/6816))
|
||||
- [Home/Host] Displays a warning for hosts with HVM disabled [#6823](https://github.com/vatesfr/xen-orchestra/issues/6823) (PR [#6834](https://github.com/vatesfr/xen-orchestra/pull/6834))
|
||||
- [OVA import] Workaround for OVA generated by Oracle VM with faulty size in metadata [#6824](https://github.com/vatesfr/xen-orchestra/issues/6824)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
@@ -32,8 +33,10 @@
|
||||
|
||||
<!--packages-start-->
|
||||
|
||||
- xo-vmdk-to-vhd patch
|
||||
- @xen-orchestra/mixins patch
|
||||
- xo-cli minor
|
||||
- xo-server minor
|
||||
- xo-web minor
|
||||
|
||||
<!--packages-end-->
|
||||
|
||||
@@ -711,30 +711,12 @@ export default class Xapi extends XapiBase {
|
||||
throw operationFailed({ objectId: vm.id, code: 'TOO_MANY_VIFs' })
|
||||
}
|
||||
await Promise.all(
|
||||
map(disks, async disk => {
|
||||
const vdi = (vdis[disk.path] = await this._getOrWaitObject(
|
||||
await this.VDI_create({
|
||||
name_description: disk.descriptionLabel,
|
||||
name_label: disk.nameLabel,
|
||||
SR: sr.$ref,
|
||||
virtual_size: disk.capacity,
|
||||
})
|
||||
))
|
||||
$defer.onFailure(() => vdi.$destroy())
|
||||
compression[disk.path] = disk.compression
|
||||
return this.VBD_create({
|
||||
userdevice: String(disk.position),
|
||||
VDI: vdi.$ref,
|
||||
map(networks, (networkId, i) =>
|
||||
this.VIF_create({
|
||||
device: vifDevices[i],
|
||||
network: this.getObject(networkId).$ref,
|
||||
VM: vm.$ref,
|
||||
})
|
||||
}).concat(
|
||||
map(networks, (networkId, i) =>
|
||||
this.VIF_create({
|
||||
device: vifDevices[i],
|
||||
network: this.getObject(networkId).$ref,
|
||||
VM: vm.$ref,
|
||||
})
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
@@ -747,9 +729,9 @@ export default class Xapi extends XapiBase {
|
||||
extract.on('finish', resolve)
|
||||
extract.on('error', reject)
|
||||
extract.on('entry', async (entry, stream, cb) => {
|
||||
// Not a disk to import.
|
||||
const vdi = vdis[entry.name]
|
||||
if (!vdi) {
|
||||
const diskMetadata = disks.find(({ path }) => path === entry.name)
|
||||
// Not a disk to import
|
||||
if (!diskMetadata) {
|
||||
stream.on('end', cb)
|
||||
stream.resume()
|
||||
return
|
||||
@@ -762,7 +744,26 @@ export default class Xapi extends XapiBase {
|
||||
compression[entry.name] === 'gzip',
|
||||
entry.size
|
||||
)
|
||||
|
||||
try {
|
||||
// vmdk size can be wrong in ova
|
||||
// we use the size ine the vmdk descriptor to create the vdi
|
||||
const vdi = (vdis[diskMetadata.path] = await this._getOrWaitObject(
|
||||
await this.VDI_create({
|
||||
name_description: diskMetadata.descriptionLabel,
|
||||
name_label: diskMetadata.nameLabel,
|
||||
SR: sr.$ref,
|
||||
virtual_size: vhdStream._rawLength,
|
||||
})
|
||||
))
|
||||
$defer.onFailure(() => vdi.$destroy())
|
||||
compression[diskMetadata.path] = diskMetadata.compression
|
||||
await this.VBD_create({
|
||||
userdevice: String(diskMetadata.position),
|
||||
VDI: vdi.$ref,
|
||||
VM: vm.$ref,
|
||||
})
|
||||
|
||||
await vdi.$importContent(vhdStream, { format: VDI_FORMAT_VHD })
|
||||
// See: https://github.com/mafintosh/tar-stream#extracting
|
||||
// No import parallelization.
|
||||
|
||||
@@ -19,12 +19,14 @@ export { default as readVmdkGrainTable, readCapacityAndGrainTable } from './vmdk
|
||||
async function vmdkToVhd(vmdkReadStream, grainLogicalAddressList, grainFileOffsetList, gzipped = false, length) {
|
||||
const parser = new VMDKDirectParser(vmdkReadStream, grainLogicalAddressList, grainFileOffsetList, gzipped, length)
|
||||
const header = await parser.readHeader()
|
||||
return createReadableSparseStream(
|
||||
const vhdStream = await createReadableSparseStream(
|
||||
header.capacitySectors * 512,
|
||||
header.grainSizeSectors * 512,
|
||||
grainLogicalAddressList,
|
||||
parser.blockIterator()
|
||||
)
|
||||
vhdStream._rawLength = parser.descriptor.extents[0].size
|
||||
return vhdStream
|
||||
}
|
||||
|
||||
export async function computeVmdkLength(diskName, vhdReadStream) {
|
||||
|
||||
@@ -25,6 +25,7 @@ function parseDescriptor(descriptorSlice) {
|
||||
extentList.push({
|
||||
access: items[0],
|
||||
sizeSectors: items[1],
|
||||
size: items[1] * 512,
|
||||
type: items[2],
|
||||
name: items[3],
|
||||
offset: items.length > 4 ? items[4] : 0,
|
||||
|
||||
Reference in New Issue
Block a user