chore(package): upgrade golike-defer to 0.4.1 (#632)

This commit is contained in:
Julien Fontanet 2017-12-21 09:55:13 +01:00 committed by GitHub
parent 3ef08d3d5e
commit a6869638f5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 62 additions and 62 deletions

View File

@ -62,7 +62,7 @@
"fatfs": "^0.10.4", "fatfs": "^0.10.4",
"from2": "^2.3.0", "from2": "^2.3.0",
"fs-extra": "^5.0.0", "fs-extra": "^5.0.0",
"golike-defer": "^0.3.0", "golike-defer": "^0.4.1",
"hashy": "^0.6.2", "hashy": "^0.6.2",
"helmet": "^3.9.0", "helmet": "^3.9.0",
"highland": "^2.11.1", "highland": "^2.11.1",

View File

@ -308,7 +308,7 @@ async function glusterCmd (glusterEndpoint, cmd, ignoreError = false) {
return result return result
} }
const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure, xapi, pif, vlan, networkPrefix) { const createNetworkAndInsertHosts = defer(async function ($defer, xapi, pif, vlan, networkPrefix) {
let hostIpLastNumber = HOST_FIRST_NUMBER let hostIpLastNumber = HOST_FIRST_NUMBER
const xosanNetwork = await xapi.createNetwork({ const xosanNetwork = await xapi.createNetwork({
name: 'XOSAN network', name: 'XOSAN network',
@ -317,7 +317,7 @@ const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure,
mtu: pif.mtu, mtu: pif.mtu,
vlan: +vlan, vlan: +vlan,
}) })
$onFailure(() => xapi.deleteNetwork(xosanNetwork)) $defer.onFailure(() => xapi.deleteNetwork(xosanNetwork))
const addresses = xosanNetwork.$PIFs.map(pif => ({pif, address: networkPrefix + (hostIpLastNumber++)})) const addresses = xosanNetwork.$PIFs.map(pif => ({pif, address: networkPrefix + (hostIpLastNumber++)}))
await asyncMap(addresses, addressAndPif => reconfigurePifIP(xapi, addressAndPif.pif, addressAndPif.address)) await asyncMap(addresses, addressAndPif => reconfigurePifIP(xapi, addressAndPif.pif, addressAndPif.address))
const master = xapi.pool.$master const master = xapi.pool.$master
@ -354,10 +354,10 @@ async function getOrCreateSshKey (xapi) {
return sshKey return sshKey
} }
const _probePoolAndWaitForPresence = defer.onFailure(async function ($onFailure, glusterEndpoint, addresses) { const _probePoolAndWaitForPresence = defer(async function ($defer, glusterEndpoint, addresses) {
await asyncMap(addresses, async (address) => { await asyncMap(addresses, async (address) => {
await glusterCmd(glusterEndpoint, 'peer probe ' + address) await glusterCmd(glusterEndpoint, 'peer probe ' + address)
$onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + address, true)) $defer.onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + address, true))
}) })
function shouldRetry (peers) { function shouldRetry (peers) {
@ -416,7 +416,7 @@ async function configureGluster (redundancy, ipAndHosts, glusterEndpoint, gluste
await glusterCmd(glusterEndpoint, 'volume start xosan') await glusterCmd(glusterEndpoint, 'volume start xosan')
} }
export const createSR = defer.onFailure(async function ($onFailure, { export const createSR = defer(async function ($defer, {
template, pif, vlan, srs, glusterType, template, pif, vlan, srs, glusterType,
redundancy, brickSize = this::computeBrickSize(srs), memorySize = 2 * GIGABYTE, ipRange = DEFAULT_NETWORK_PREFIX + '.0', redundancy, brickSize = this::computeBrickSize(srs), memorySize = 2 * GIGABYTE, ipRange = DEFAULT_NETWORK_PREFIX + '.0',
}) { }) {
@ -444,7 +444,7 @@ export const createSR = defer.onFailure(async function ($onFailure, {
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0} CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
try { try {
const xosanNetwork = await createNetworkAndInsertHosts(xapi, pif, vlan, networkPrefix) const xosanNetwork = await createNetworkAndInsertHosts(xapi, pif, vlan, networkPrefix)
$onFailure(() => xapi.deleteNetwork(xosanNetwork)) $defer.onFailure(() => xapi.deleteNetwork(xosanNetwork))
const sshKey = await getOrCreateSshKey(xapi) const sshKey = await getOrCreateSshKey(xapi)
const srsObjects = map(srs, srId => xapi.getObject(srId)) const srsObjects = map(srs, srId => xapi.getObject(srId))
await Promise.all(srsObjects.map(sr => callPlugin(xapi, sr.$PBDs[0].$host, 'receive_ssh_keys', { await Promise.all(srsObjects.map(sr => callPlugin(xapi, sr.$PBDs[0].$host, 'receive_ssh_keys', {
@ -456,11 +456,11 @@ export const createSR = defer.onFailure(async function ($onFailure, {
const firstSr = srsObjects[0] const firstSr = srsObjects[0]
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1} CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
const firstVM = await this::_importGlusterVM(xapi, template, firstSr) const firstVM = await this::_importGlusterVM(xapi, template, firstSr)
$onFailure(() => xapi.deleteVm(firstVM, true)) $defer.onFailure(() => xapi.deleteVm(firstVM, true))
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2} CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
const copiedVms = await asyncMap(srsObjects.slice(1), sr => const copiedVms = await asyncMap(srsObjects.slice(1), sr =>
copyVm(xapi, firstVM, sr)::tap(({vm}) => copyVm(xapi, firstVM, sr)::tap(({vm}) =>
$onFailure(() => xapi.deleteVm(vm)) $defer.onFailure(() => xapi.deleteVm(vm))
) )
) )
const vmsAndSrs = [{ const vmsAndSrs = [{
@ -473,7 +473,7 @@ export const createSR = defer.onFailure(async function ($onFailure, {
const sr = firstSr const sr = firstSr
const arbiterIP = networkPrefix + (vmIpLastNumber++) const arbiterIP = networkPrefix + (vmIpLastNumber++)
const arbiterVm = await xapi.copyVm(firstVM, sr) const arbiterVm = await xapi.copyVm(firstVM, sr)
$onFailure(() => xapi.deleteVm(arbiterVm, true)) $defer.onFailure(() => xapi.deleteVm(arbiterVm, true))
arbiter = await _prepareGlusterVm(xapi, sr, arbiterVm, xosanNetwork, arbiterIP, { arbiter = await _prepareGlusterVm(xapi, sr, arbiterVm, xosanNetwork, arbiterIP, {
labelSuffix: '_arbiter', labelSuffix: '_arbiter',
increaseDataDisk: false, increaseDataDisk: false,
@ -498,7 +498,7 @@ export const createSR = defer.onFailure(async function ($onFailure, {
'xosan', '', true, {}) 'xosan', '', true, {})
debug('sr created') debug('sr created')
// we just forget because the cleanup actions are stacked in the $onFailure system // we just forget because the cleanup actions are stacked in the $onFailure system
$onFailure(() => xapi.forgetSr(xosanSrRef)) $defer.onFailure(() => xapi.forgetSr(xosanSrRef))
if (arbiter) { if (arbiter) {
ipAndHosts.push(arbiter) ipAndHosts.push(arbiter)
} }
@ -802,7 +802,7 @@ const _median = arr => {
return arr[Math.floor(arr.length / 2)] return arr[Math.floor(arr.length / 2)]
} }
const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xosansr, lvmsrId, { const insertNewGlusterVm = defer(async function ($defer, xapi, xosansr, lvmsrId, {
labelSuffix = '', labelSuffix = '',
glusterEndpoint = null, ipAddress = null, increaseDataDisk = true, brickSize = Infinity, glusterEndpoint = null, ipAddress = null, increaseDataDisk = true, brickSize = Infinity,
}) { }) {
@ -822,7 +822,7 @@ const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xos
const srObject = xapi.getObject(lvmsrId) const srObject = xapi.getObject(lvmsrId)
// can't really copy an existing VM, because existing gluster VMs disks might too large to be copied. // can't really copy an existing VM, because existing gluster VMs disks might too large to be copied.
const newVM = await this::_importGlusterVM(xapi, data.template, lvmsrId) const newVM = await this::_importGlusterVM(xapi, data.template, lvmsrId)
$onFailure(() => xapi.deleteVm(newVM, true)) $defer.onFailure(() => xapi.deleteVm(newVM, true))
const addressAndHost = await _prepareGlusterVm(xapi, srObject, newVM, xosanNetwork, ipAddress, { const addressAndHost = await _prepareGlusterVm(xapi, srObject, newVM, xosanNetwork, ipAddress, {
labelSuffix, labelSuffix,
increaseDataDisk, increaseDataDisk,
@ -836,7 +836,7 @@ const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xos
return {data, newVM, addressAndHost, glusterEndpoint} return {data, newVM, addressAndHost, glusterEndpoint}
}) })
export const addBricks = defer.onFailure(async function ($onFailure, {xosansr, lvmsrs, brickSize}) { export const addBricks = defer(async function ($defer, {xosansr, lvmsrs, brickSize}) {
const OPERATION_OBJECT = { const OPERATION_OBJECT = {
operation: 'addBricks', operation: 'addBricks',
states: ['insertingNewVms', 'addingBricks', 'scaningSr'], states: ['insertingNewVms', 'addingBricks', 'scaningSr'],
@ -857,8 +857,8 @@ export const addBricks = defer.onFailure(async function ($onFailure, {xosansr, l
const ipAddress = _findIPAddressOutsideList(usedAddresses.concat(newAddresses), data.networkPrefix) const ipAddress = _findIPAddressOutsideList(usedAddresses.concat(newAddresses), data.networkPrefix)
newAddresses.push(ipAddress) newAddresses.push(ipAddress)
const {newVM, addressAndHost} = await this::insertNewGlusterVm(xapi, xosansr, newSr, {ipAddress, brickSize}) const {newVM, addressAndHost} = await this::insertNewGlusterVm(xapi, xosansr, newSr, {ipAddress, brickSize})
$onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + ipAddress, true)) $defer.onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + ipAddress, true))
$onFailure(() => xapi.deleteVm(newVM, true)) $defer.onFailure(() => xapi.deleteVm(newVM, true))
const brickName = addressAndHost.brickName const brickName = addressAndHost.brickName
newNodes.push({brickName, host: addressAndHost.host.$id, vm: {id: newVM.$id, ip: ipAddress}, underlyingSr: newSr}) newNodes.push({brickName, host: addressAndHost.host.$id, vm: {id: newVM.$id, ip: ipAddress}, underlyingSr: newSr})
} }
@ -901,7 +901,7 @@ addBricks.resolve = {
lvmsrs: ['sr', 'SR', 'administrate'], lvmsrs: ['sr', 'SR', 'administrate'],
} }
export const removeBricks = defer.onFailure(async function ($onFailure, {xosansr, bricks}) { export const removeBricks = defer(async function ($defer, {xosansr, bricks}) {
const xapi = this.getXapi(xosansr) const xapi = this.getXapi(xosansr)
if (CURRENT_POOL_OPERATIONS[xapi.pool.$id]) { if (CURRENT_POOL_OPERATIONS[xapi.pool.$id]) {
throw new Error('this there is already a XOSAN operation running on this pool') throw new Error('this there is already a XOSAN operation running on this pool')

View File

@ -792,8 +792,8 @@ export default class Xapi extends XapiBase {
// Create a snapshot of the VM and returns a delta export object. // Create a snapshot of the VM and returns a delta export object.
@cancellable @cancellable
@deferrable.onFailure @deferrable
async exportDeltaVm ($onFailure, $cancelToken, vmId, baseVmId = undefined, { async exportDeltaVm ($defer, $cancelToken, vmId, baseVmId = undefined, {
bypassVdiChainsCheck = false, bypassVdiChainsCheck = false,
// Contains a vdi.$id set of vmId. // Contains a vdi.$id set of vmId.
@ -807,7 +807,7 @@ export default class Xapi extends XapiBase {
} }
const vm = await this.snapshotVm(vmId) const vm = await this.snapshotVm(vmId)
$onFailure(() => this._deleteVm(vm)) $defer.onFailure(() => this._deleteVm(vm))
if (snapshotNameLabel) { if (snapshotNameLabel) {
this._setObjectProperties(vm, { this._setObjectProperties(vm, {
nameLabel: snapshotNameLabel, nameLabel: snapshotNameLabel,
@ -878,7 +878,7 @@ export default class Xapi extends XapiBase {
$SR$uuid: vdi.$SR.uuid, $SR$uuid: vdi.$SR.uuid,
} }
const stream = streams[`${vdiRef}.vhd`] = this._exportVdi($cancelToken, vdi, baseVdi, VDI_FORMAT_VHD) const stream = streams[`${vdiRef}.vhd`] = this._exportVdi($cancelToken, vdi, baseVdi, VDI_FORMAT_VHD)
$onFailure(stream.cancel) $defer.onFailure(stream.cancel)
}) })
const vifs = {} const vifs = {}
@ -908,8 +908,8 @@ export default class Xapi extends XapiBase {
}) })
} }
@deferrable.onFailure @deferrable
async importDeltaVm ($onFailure, delta, { async importDeltaVm ($defer, delta, {
deleteBase = false, deleteBase = false,
disableStartAfterImport = true, disableStartAfterImport = true,
mapVdisSrs = {}, mapVdisSrs = {},
@ -950,7 +950,7 @@ export default class Xapi extends XapiBase {
is_a_template: false, is_a_template: false,
}) })
) )
$onFailure(() => this._deleteVm(vm)) $defer.onFailure(() => this._deleteVm(vm))
await Promise.all([ await Promise.all([
this._setObjectProperties(vm, { this._setObjectProperties(vm, {
@ -983,7 +983,7 @@ export default class Xapi extends XapiBase {
}, },
sr: mapVdisSrs[vdi.uuid] || srId, sr: mapVdisSrs[vdi.uuid] || srId,
}) })
$onFailure(() => this._deleteVdi(newVdi)) $defer.onFailure(() => this._deleteVdi(newVdi))
return newVdi return newVdi
} }
@ -999,7 +999,7 @@ export default class Xapi extends XapiBase {
const newVdi = await this._getOrWaitObject( const newVdi = await this._getOrWaitObject(
await this._cloneVdi(baseVdi) await this._cloneVdi(baseVdi)
) )
$onFailure(() => this._deleteVdi(newVdi)) $defer.onFailure(() => this._deleteVdi(newVdi))
await this._updateObjectMapProperty(newVdi, 'other_config', { await this._updateObjectMapProperty(newVdi, 'other_config', {
[TAG_COPY_SRC]: vdi.uuid, [TAG_COPY_SRC]: vdi.uuid,
@ -1238,8 +1238,8 @@ export default class Xapi extends XapiBase {
return vmRef return vmRef
} }
@deferrable.onFailure @deferrable
async _importOvaVm ($onFailure, stream, { async _importOvaVm ($defer, stream, {
descriptionLabel, descriptionLabel,
disks, disks,
memory, memory,
@ -1260,7 +1260,7 @@ export default class Xapi extends XapiBase {
VCPUs_max: nCpus, VCPUs_max: nCpus,
}) })
) )
$onFailure(() => this._deleteVm(vm)) $defer.onFailure(() => this._deleteVm(vm))
// Disable start and change the VM name label during import. // Disable start and change the VM name label during import.
await Promise.all([ await Promise.all([
this.addForbiddenOperationToVm(vm.$id, 'start', 'OVA import in progress...'), this.addForbiddenOperationToVm(vm.$id, 'start', 'OVA import in progress...'),
@ -1277,7 +1277,7 @@ export default class Xapi extends XapiBase {
name_label: disk.nameLabel, name_label: disk.nameLabel,
sr: sr.$ref, sr: sr.$ref,
}) })
$onFailure(() => this._deleteVdi(vdi)) $defer.onFailure(() => this._deleteVdi(vdi))
return this._createVbd(vm, vdi, { position: disk.position }) return this._createVbd(vm, vdi, { position: disk.position })
}).concat(map(networks, (networkId, i) => ( }).concat(map(networks, (networkId, i) => (
@ -1959,8 +1959,8 @@ export default class Xapi extends XapiBase {
) )
) )
} }
@deferrable.onFailure @deferrable
async createNetwork ($onFailure, { async createNetwork ($defer, {
name, name,
description = 'Created with Xen Orchestra', description = 'Created with Xen Orchestra',
pifId, pifId,
@ -1973,7 +1973,7 @@ export default class Xapi extends XapiBase {
MTU: asInteger(mtu), MTU: asInteger(mtu),
other_config: {}, other_config: {},
}) })
$onFailure(() => this.call('network.destroy', networkRef)) $defer.onFailure(() => this.call('network.destroy', networkRef))
if (pifId) { if (pifId) {
await this.call('pool.create_VLAN_from_PIF', this.getObject(pifId).$ref, networkRef, asInteger(vlan)) await this.call('pool.create_VLAN_from_PIF', this.getObject(pifId).$ref, networkRef, asInteger(vlan))
} }
@ -2017,15 +2017,15 @@ export default class Xapi extends XapiBase {
) )
} }
@deferrable.onFailure @deferrable
async createBondedNetwork ($onFailure, { async createBondedNetwork ($defer, {
bondMode, bondMode,
mac = '', mac = '',
pifIds, pifIds,
...params ...params
}) { }) {
const network = await this.createNetwork(params) const network = await this.createNetwork(params)
$onFailure(() => this.deleteNetwork(network)) $defer.onFailure(() => this.deleteNetwork(network))
// TODO: test and confirm: // TODO: test and confirm:
// Bond.create is called here with PIFs from one host but XAPI should then replicate the // Bond.create is called here with PIFs from one host but XAPI should then replicate the
// bond on each host in the same pool with the corresponding PIFs (ie same interface names?). // bond on each host in the same pool with the corresponding PIFs (ie same interface names?).
@ -2116,15 +2116,15 @@ export default class Xapi extends XapiBase {
} }
// Generic Config Drive // Generic Config Drive
@deferrable.onFailure @deferrable
async createCloudInitConfigDrive ($onFailure, vmId, srId, config) { async createCloudInitConfigDrive ($defer, vmId, srId, config) {
const vm = this.getObject(vmId) const vm = this.getObject(vmId)
const sr = this.getObject(srId) const sr = this.getObject(srId)
// First, create a small VDI (10MB) which will become the ConfigDrive // First, create a small VDI (10MB) which will become the ConfigDrive
const buffer = fatfsBufferInit() const buffer = fatfsBufferInit()
const vdi = await this.createVdi(buffer.length, { name_label: 'XO CloudConfigDrive', sr: sr.$ref }) const vdi = await this.createVdi(buffer.length, { name_label: 'XO CloudConfigDrive', sr: sr.$ref })
$onFailure(() => this._deleteVdi(vdi)) $defer.onFailure(() => this._deleteVdi(vdi))
// Then, generate a FAT fs // Then, generate a FAT fs
const fs = promisifyAll(fatfs.createFileSystem(fatfsBuffer(buffer))) const fs = promisifyAll(fatfs.createFileSystem(fatfsBuffer(buffer)))
@ -2146,14 +2146,14 @@ export default class Xapi extends XapiBase {
await this._createVbd(vm, vdi) await this._createVbd(vm, vdi)
} }
@deferrable.onFailure @deferrable
async createTemporaryVdiOnSr ($onFailure, stream, sr, name_label, name_description) { async createTemporaryVdiOnSr ($defer, stream, sr, name_label, name_description) {
const vdi = await this.createVdi(stream.length, { const vdi = await this.createVdi(stream.length, {
sr: sr.$ref, sr: sr.$ref,
name_label, name_label,
name_description, name_description,
}) })
$onFailure(() => this._deleteVdi(vdi)) $defer.onFailure(() => this._deleteVdi(vdi))
await this.importVdiContent(vdi.$id, stream, { format: VDI_FORMAT_RAW }) await this.importVdiContent(vdi.$id, stream, { format: VDI_FORMAT_RAW })

View File

@ -27,8 +27,8 @@ const XEN_VIDEORAM_VALUES = [1, 2, 4, 8, 16]
export default { export default {
// TODO: clean up on error. // TODO: clean up on error.
@deferrable.onFailure @deferrable
async createVm ($onFailure, templateId, { async createVm ($defer, templateId, {
name_label, // eslint-disable-line camelcase name_label, // eslint-disable-line camelcase
nameLabel = name_label, // eslint-disable-line camelcase nameLabel = name_label, // eslint-disable-line camelcase
@ -62,7 +62,7 @@ export default {
// Clones the template. // Clones the template.
const vmRef = await this[clone ? '_cloneVm' : '_copyVm'](template, nameLabel) const vmRef = await this[clone ? '_cloneVm' : '_copyVm'](template, nameLabel)
$onFailure(() => this.deleteVm(vmRef)) $defer.onFailure(() => this.deleteVm(vmRef))
// TODO: copy BIOS strings? // TODO: copy BIOS strings?

View File

@ -399,8 +399,8 @@ export default class {
// ----------------------------------------------------------------- // -----------------------------------------------------------------
@deferrable.onFailure @deferrable
async deltaCopyVm ($onFailure, srcVm, targetSr, force = false, retention = 1) { async deltaCopyVm ($defer, srcVm, targetSr, force = false, retention = 1) {
const transferStart = Date.now() const transferStart = Date.now()
const srcXapi = this._xo.getXapi(srcVm) const srcXapi = this._xo.getXapi(srcVm)
const targetXapi = this._xo.getXapi(targetSr) const targetXapi = this._xo.getXapi(targetSr)
@ -426,8 +426,8 @@ export default class {
bypassVdiChainsCheck: force, bypassVdiChainsCheck: force,
snapshotNameLabel: `XO_DELTA_EXPORT: ${targetSr.name_label} (${targetSr.uuid})`, snapshotNameLabel: `XO_DELTA_EXPORT: ${targetSr.name_label} (${targetSr.uuid})`,
}) })
$onFailure(() => srcXapi.deleteVm(delta.vm.uuid)) $defer.onFailure(() => srcXapi.deleteVm(delta.vm.uuid))
$onFailure(cancel) $defer.onFailure(cancel)
const date = safeDateFormat(Date.now()) const date = safeDateFormat(Date.now())
delta.vm.name_label += ` (${date})` delta.vm.name_label += ` (${date})`
@ -774,8 +774,8 @@ export default class {
} }
} }
@deferrable.onFailure @deferrable
async rollingDeltaVmBackup ($onFailure, {vm, remoteId, tag, retention}) { async rollingDeltaVmBackup ($defer, {vm, remoteId, tag, retention}) {
const transferStart = Date.now() const transferStart = Date.now()
const handler = await this._xo.getRemoteHandler(remoteId) const handler = await this._xo.getRemoteHandler(remoteId)
const xapi = this._xo.getXapi(vm) const xapi = this._xo.getXapi(vm)
@ -817,8 +817,8 @@ export default class {
fullVdisRequired, fullVdisRequired,
disableBaseTags: true, disableBaseTags: true,
}) })
$onFailure(() => xapi.deleteVm(delta.vm.uuid)) $defer.onFailure(() => xapi.deleteVm(delta.vm.uuid))
$onFailure(cancel) $defer.onFailure(cancel)
// Save vdis. // Save vdis.
const vdiBackups = await pSettle( const vdiBackups = await pSettle(
@ -857,7 +857,7 @@ export default class {
} }
} }
$onFailure(() => asyncMap(fulFilledVdiBackups, vdiBackup => $defer.onFailure(() => asyncMap(fulFilledVdiBackups, vdiBackup =>
handler.unlink(`${dir}/${vdiBackup.value()}`)::ignoreErrors() handler.unlink(`${dir}/${vdiBackup.value()}`)::ignoreErrors()
)) ))
@ -869,7 +869,7 @@ export default class {
const backupFormat = `${date}_${vm.name_label}` const backupFormat = `${date}_${vm.name_label}`
const infoPath = `${dir}/${backupFormat}${DELTA_BACKUP_EXT}` const infoPath = `${dir}/${backupFormat}${DELTA_BACKUP_EXT}`
$onFailure(() => handler.unlink(infoPath)) $defer.onFailure(() => handler.unlink(infoPath))
// Write Metadata. // Write Metadata.
await handler.outputFile(infoPath, JSON.stringify(delta, null, 2)) await handler.outputFile(infoPath, JSON.stringify(delta, null, 2))
@ -972,11 +972,11 @@ export default class {
return this._backupVm(vm, handler, file, {compress, onlyMetadata}) return this._backupVm(vm, handler, file, {compress, onlyMetadata})
} }
@deferrable.onFailure @deferrable
async _backupVm ($onFailure, vm, handler, file, {compress, onlyMetadata}) { async _backupVm ($defer, vm, handler, file, {compress, onlyMetadata}) {
const targetStream = await handler.createOutputStream(file) const targetStream = await handler.createOutputStream(file)
$onFailure.call(handler, 'unlink', file) $defer.onFailure.call(handler, 'unlink', file)
$onFailure.call(targetStream, 'close') $defer.onFailure.call(targetStream, 'close')
const promise = eventToPromise(targetStream, 'finish') const promise = eventToPromise(targetStream, 'finish')

View File

@ -3069,9 +3069,9 @@ glogg@^1.0.0:
dependencies: dependencies:
sparkles "^1.0.0" sparkles "^1.0.0"
golike-defer@^0.3.0: golike-defer@^0.4.1:
version "0.3.0" version "0.4.1"
resolved "https://registry.yarnpkg.com/golike-defer/-/golike-defer-0.3.0.tgz#3043bf000e545ea8b4bddbda62f6198104c1a08a" resolved "https://registry.yarnpkg.com/golike-defer/-/golike-defer-0.4.1.tgz#7a1cd435d61e461305805d980b133a0f3db4e1cc"
graceful-fs@4.X, graceful-fs@^4.0.0, graceful-fs@^4.1.0, graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.6: graceful-fs@4.X, graceful-fs@^4.0.0, graceful-fs@^4.1.0, graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.6:
version "4.1.11" version "4.1.11"