feat(xo-server): VM_destroy instead of deleteVm (#5693)
Continuation of 5f1c127
This commit is contained in:
parent
22ba1302d2
commit
cb52a8b51b
@ -64,7 +64,7 @@ export async function destroy({ sr }) {
|
||||
const config = xapi.xo.getData(sr, 'xosan_config')
|
||||
// we simply forget because the hosted disks are being destroyed with the VMs
|
||||
await xapi.forgetSr(sr._xapiId)
|
||||
await asyncMapSettled(config.nodes, node => xapi.deleteVm(node.vm.id))
|
||||
await asyncMapSettled(config.nodes, node => this.getXapiObject(node.vm.id).$destroy())
|
||||
await xapi.deleteNetwork(config.network)
|
||||
if (sr.SR_type === 'xosan') {
|
||||
await this.unbindXosanLicense({ srId: sr.id })
|
||||
|
@ -60,7 +60,7 @@ export async function copyVm({ vm, sr }) {
|
||||
console.log('export full VM...')
|
||||
const input = await srcXapi.exportVm(vm)
|
||||
console.log('import full VM...')
|
||||
await tgtXapi.deleteVm(await tgtXapi.importVm(input, { srId: sr }))
|
||||
await tgtXapi.VM_destroy((await tgtXapi.importVm(input, { srId: sr })).$ref)
|
||||
}
|
||||
|
||||
// delta
|
||||
@ -72,7 +72,7 @@ export async function copyVm({ vm, sr }) {
|
||||
srId: sr,
|
||||
})
|
||||
console.log('transfered size:', transferSize)
|
||||
await tgtXapi.deleteVm(copyVm)
|
||||
await tgtXapi.VM_destroy(copyVm.$ref)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -152,7 +152,7 @@ export const create = defer(async function ($defer, params) {
|
||||
}
|
||||
|
||||
const xapiVm = await xapi.createVm(template._xapiId, params, checkLimits)
|
||||
$defer.onFailure(() => xapi.deleteVm(xapiVm.$id, true, true))
|
||||
$defer.onFailure(() => xapi.VM_destroy(xapiVm.$ref, true, true))
|
||||
|
||||
const vm = xapi.xo.addObject(xapiVm)
|
||||
|
||||
@ -385,7 +385,7 @@ const delete_ = defer(async function (
|
||||
}
|
||||
})
|
||||
|
||||
return xapi.deleteVm(vm._xapiId, deleteDisks, force, forceDeleteDefaultTemplate)
|
||||
return xapi.VM_destroy(vm._xapiRef, deleteDisks, force, forceDeleteDefaultTemplate)
|
||||
})
|
||||
|
||||
delete_.params = {
|
||||
@ -664,11 +664,11 @@ export const clone = defer(async function ($defer, { vm, name, full_copy: fullCo
|
||||
await checkPermissionOnSrs.call(this, vm)
|
||||
const xapi = this.getXapi(vm)
|
||||
|
||||
const { $id: cloneId } = await xapi.cloneVm(vm._xapiRef, {
|
||||
const { $id: cloneId, $ref: cloneRef } = await xapi.cloneVm(vm._xapiRef, {
|
||||
nameLabel: name,
|
||||
fast: !fullCopy,
|
||||
})
|
||||
$defer.onFailure(() => xapi.deleteVm(cloneId))
|
||||
$defer.onFailure(() => xapi.VM_destroy(cloneRef))
|
||||
|
||||
const isAdmin = this.user.permission === 'admin'
|
||||
if (!isAdmin) {
|
||||
@ -786,10 +786,10 @@ export const snapshot = defer(async function (
|
||||
}
|
||||
|
||||
const xapi = this.getXapi(vm)
|
||||
const { $id: snapshotId } = await (saveMemory
|
||||
const { $id: snapshotId, $ref: snapshotRef } = await (saveMemory
|
||||
? xapi.checkpointVm(vm._xapiRef, name)
|
||||
: xapi.snapshotVm(vm._xapiRef, name))
|
||||
$defer.onFailure(() => xapi.deleteVm(snapshotId))
|
||||
$defer.onFailure(() => xapi.VM_destroy(snapshotRef))
|
||||
|
||||
if (description !== undefined) {
|
||||
await xapi.editVm(snapshotId, { name_description: description })
|
||||
|
@ -585,10 +585,10 @@ export const createSR = defer(async function (
|
||||
const firstSr = srsObjects[0]
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 1 }
|
||||
const firstVM = await this::_importGlusterVM(xapi, template, firstSr)
|
||||
$defer.onFailure(() => xapi.deleteVm(firstVM, true))
|
||||
$defer.onFailure(() => xapi.VM_destroy(firstVM.$ref, true))
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 2 }
|
||||
const copiedVms = await asyncMapSettled(srsObjects.slice(1), sr =>
|
||||
copyVm(xapi, firstVM, sr)::tap(({ vm }) => $defer.onFailure(() => xapi.deleteVm(vm)))
|
||||
copyVm(xapi, firstVM, sr)::tap(({ vm }) => $defer.onFailure(() => xapi.VM_destroy(vm.$ref)))
|
||||
)
|
||||
const vmsAndSrs = [
|
||||
{
|
||||
@ -602,7 +602,7 @@ export const createSR = defer(async function (
|
||||
const sr = firstSr
|
||||
const arbiterIP = networkPrefix + vmIpLastNumber++
|
||||
const arbiterVm = await xapi.copyVm(firstVM, sr)
|
||||
$defer.onFailure(() => xapi.deleteVm(arbiterVm, true))
|
||||
$defer.onFailure(() => xapi.VM_destroy(arbiterVm.$ref, true))
|
||||
arbiter = await _prepareGlusterVm(xapi, sr, arbiterVm, xosanNetwork, arbiterIP, {
|
||||
labelSuffix: '_arbiter',
|
||||
increaseDataDisk: false,
|
||||
@ -870,7 +870,7 @@ export async function replaceBrick({ xosansr, previousBrick, newLvmSr, brickSize
|
||||
await xapi.xo.setData(xosansr, 'xosan_config', data)
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 2 }
|
||||
if (previousVMEntry) {
|
||||
await xapi.deleteVm(previousVMEntry.vm, true)
|
||||
await xapi.VM_destroy(previousVMEntry.vm.$ref, true)
|
||||
}
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 3 }
|
||||
await xapi.callAsync('SR.scan', xapi.getObject(xosansr).$ref)
|
||||
@ -1013,7 +1013,7 @@ const insertNewGlusterVm = defer(async function (
|
||||
const srObject = xapi.getObject(lvmsrId)
|
||||
// can't really copy an existing VM, because existing gluster VMs disks might too large to be copied.
|
||||
const newVM = await this::_importGlusterVM(xapi, data.template, lvmsrId)
|
||||
$defer.onFailure(() => xapi.deleteVm(newVM, true))
|
||||
$defer.onFailure(() => xapi.VM_destroy(newVM.$ref, true))
|
||||
const addressAndHost = await _prepareGlusterVm(xapi, srObject, newVM, xosanNetwork, ipAddress, {
|
||||
labelSuffix,
|
||||
increaseDataDisk,
|
||||
@ -1051,7 +1051,7 @@ export const addBricks = defer(async function ($defer, { xosansr, lvmsrs, brickS
|
||||
newAddresses.push(ipAddress)
|
||||
const { newVM, addressAndHost } = await this::insertNewGlusterVm(xapi, xosansr, newSr, { ipAddress, brickSize })
|
||||
$defer.onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + ipAddress, true))
|
||||
$defer.onFailure(() => xapi.deleteVm(newVM, true))
|
||||
$defer.onFailure(() => xapi.VM_destroy(newVM.$ref, true))
|
||||
const brickName = addressAndHost.brickName
|
||||
newNodes.push({
|
||||
brickName,
|
||||
@ -1070,7 +1070,7 @@ export const addBricks = defer(async function ($defer, { xosansr, lvmsrs, brickS
|
||||
data.type = 'replica'
|
||||
await xapi.xo.setData(xosansr, 'xosan_config', data)
|
||||
await glusterCmd(glusterEndpoint, 'peer detach ' + arbiterNode.vm.ip, true)
|
||||
await xapi.deleteVm(arbiterNode.vm.id, true)
|
||||
await xapi.VM_destroy(await xapi.call('VM.get_by_uuid', arbiterNode.vm.id), true)
|
||||
}
|
||||
CURRENT_POOL_OPERATIONS[poolId] = { ...OPERATION_OBJECT, state: 1 }
|
||||
await glusterCmd(glusterEndpoint, `volume add-brick xosan ${newNodes.map(n => n.brickName).join(' ')}`)
|
||||
@ -1123,7 +1123,7 @@ export const removeBricks = defer(async function ($defer, { xosansr, bricks }) {
|
||||
remove(data.nodes, node => ips.includes(node.vm.ip))
|
||||
await xapi.xo.setData(xosansr.id, 'xosan_config', data)
|
||||
await xapi.callAsync('SR.scan', xapi.getObject(xosansr._xapiId).$ref)
|
||||
await asyncMapSettled(brickVMs, vm => xapi.deleteVm(vm.vm, true))
|
||||
await asyncMapSettled(brickVMs, vm => xapi.VM_destroy(vm.vm.$ref, true))
|
||||
} finally {
|
||||
delete CURRENT_POOL_OPERATIONS[xapi.pool.$id]
|
||||
}
|
||||
|
@ -563,15 +563,6 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use VM_destroy instead
|
||||
*/
|
||||
async deleteVm(vmOrId, deleteDisks = true, force = false, forceDeleteDefaultTemplate = false) {
|
||||
const $ref = typeof vmOrId === 'string' ? this.getObject(vmOrId).$ref : vmOrId.$ref
|
||||
|
||||
return this.VM_destroy($ref, { deleteDisks, force, forceDeleteDefaultTemplate })
|
||||
}
|
||||
|
||||
getVmConsole(vmId) {
|
||||
const vm = this.getObject(vmId)
|
||||
|
||||
|
@ -75,7 +75,6 @@ declare export class Xapi {
|
||||
|
||||
barrier(): Promise<void>;
|
||||
barrier(ref: string): Promise<XapiObject>;
|
||||
deleteVm(vm: Id): Promise<void>;
|
||||
editVm(vm: Id, $Dict<mixed>): Promise<void>;
|
||||
exportDeltaVm(
|
||||
cancelToken: mixed,
|
||||
|
@ -79,7 +79,7 @@ export default {
|
||||
|
||||
// Clones the template.
|
||||
const vmRef = await this[clone ? '_cloneVm' : '_copyVm'](template, nameLabel)
|
||||
$defer.onFailure(() => this.deleteVm(vmRef))
|
||||
$defer.onFailure(() => this.VM_destroy(vmRef))
|
||||
|
||||
// Copy BIOS strings
|
||||
// https://support.citrix.com/article/CTX230618
|
||||
|
@ -374,7 +374,7 @@ export default class {
|
||||
bypassVdiChainsCheck: force,
|
||||
snapshotNameLabel: `XO_DELTA_EXPORT: ${targetSr.name_label} (${targetSr.uuid})`,
|
||||
})
|
||||
$defer.onFailure(() => srcXapi.deleteVm(delta.vm.uuid))
|
||||
$defer.onFailure(() => this._xo.getXapiObject(delta.vm.uuid).$destroy())
|
||||
$defer.onFailure(cancel)
|
||||
|
||||
const date = safeDateFormat(Date.now())
|
||||
@ -400,11 +400,11 @@ export default class {
|
||||
// Once done, (asynchronously) remove the (now obsolete) local
|
||||
// base.
|
||||
if (localBaseUuid) {
|
||||
promise.then(() => srcXapi.deleteVm(localBaseUuid))::ignoreErrors()
|
||||
promise.then(() => this._xo.getXapiObject(localBaseUuid).$destroy())::ignoreErrors()
|
||||
}
|
||||
|
||||
if (toRemove !== undefined) {
|
||||
promise.then(() => asyncMapSettled(toRemove, _ => targetXapi.deleteVm(_.$id)))::ignoreErrors()
|
||||
promise.then(() => asyncMapSettled(toRemove, _ => targetXapi.VM_destroy(_.$ref)))::ignoreErrors()
|
||||
}
|
||||
|
||||
// (Asynchronously) Identify snapshot as future base.
|
||||
@ -591,7 +591,7 @@ export default class {
|
||||
)
|
||||
const baseVm = bases.pop()
|
||||
forEach(bases, base => {
|
||||
xapi.deleteVm(base.$id)::ignoreErrors()
|
||||
xapi.VM_destroy(base.$ref)::ignoreErrors()
|
||||
})
|
||||
|
||||
// Check backup dirs.
|
||||
@ -621,7 +621,8 @@ export default class {
|
||||
fullVdisRequired,
|
||||
disableBaseTags: true,
|
||||
})
|
||||
$defer.onFailure(() => xapi.deleteVm(delta.vm.uuid))
|
||||
const exportedVmRef = await xapi.call('VM.get_by_uuid', delta.vm.uuid)
|
||||
$defer.onFailure(() => xapi.VM_destroy(exportedVmRef))
|
||||
$defer.onFailure(cancel)
|
||||
|
||||
// Save vdis.
|
||||
@ -711,7 +712,7 @@ export default class {
|
||||
await this._removeOldDeltaVmBackups(xapi, { vm, handler, dir, retention })
|
||||
|
||||
if (baseVm) {
|
||||
xapi.deleteVm(baseVm.$id)::ignoreErrors()
|
||||
xapi.VM_destroy(baseVm.$ref)::ignoreErrors()
|
||||
}
|
||||
|
||||
return {
|
||||
@ -837,7 +838,7 @@ export default class {
|
||||
const promises = []
|
||||
for (let surplus = snapshots.length - (retention - 1); surplus > 0; surplus--) {
|
||||
const oldSnap = snapshots.shift()
|
||||
promises.push(xapi.deleteVm(oldSnap.uuid))
|
||||
promises.push(xapi.VM_destroy(oldSnap.$ref))
|
||||
}
|
||||
await Promise.all(promises)
|
||||
}
|
||||
@ -846,7 +847,7 @@ export default class {
|
||||
return Promise.all(
|
||||
mapToArray(vms, vm =>
|
||||
// Do not consider a failure to delete an old copy as a fatal error.
|
||||
xapi.deleteVm(vm.$id)::ignoreErrors()
|
||||
xapi.VM_destroy(vm.$ref)::ignoreErrors()
|
||||
)
|
||||
)
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ export default class Proxy {
|
||||
const { vmUuid } = await this._getProxy(id)
|
||||
if (vmUuid !== undefined) {
|
||||
try {
|
||||
await this._app.getXapi(vmUuid).deleteVm(vmUuid)
|
||||
await this._app.getXapiObject(vmUuid).$destroy()
|
||||
} catch (error) {
|
||||
if (!noSuchObject.is(error)) {
|
||||
throw error
|
||||
@ -208,7 +208,7 @@ export default class Proxy {
|
||||
}),
|
||||
{ srId }
|
||||
)
|
||||
$defer.onFailure(() => xapi._deleteVm(vm))
|
||||
$defer.onFailure(() => xapi.VM_destroy(vm.$ref))
|
||||
|
||||
const arg = { licenseId, boundObjectId: vm.uuid }
|
||||
await app.bindLicense(arg)
|
||||
@ -268,7 +268,7 @@ export default class Proxy {
|
||||
const { vmUuid } = await this._getProxy(proxyId)
|
||||
if (vmUuid !== undefined) {
|
||||
try {
|
||||
await app.getXapi(vmUuid).deleteVm(vmUuid)
|
||||
await app.getXapiObject(vmUuid).$destroy()
|
||||
} catch (error) {
|
||||
if (!noSuchObject.is(error)) {
|
||||
throw error
|
||||
|
Loading…
Reference in New Issue
Block a user