chore(package): upgrade golike-defer to 0.4.1 (#632)
This commit is contained in:
parent
3ef08d3d5e
commit
a6869638f5
@ -62,7 +62,7 @@
|
||||
"fatfs": "^0.10.4",
|
||||
"from2": "^2.3.0",
|
||||
"fs-extra": "^5.0.0",
|
||||
"golike-defer": "^0.3.0",
|
||||
"golike-defer": "^0.4.1",
|
||||
"hashy": "^0.6.2",
|
||||
"helmet": "^3.9.0",
|
||||
"highland": "^2.11.1",
|
||||
|
@ -308,7 +308,7 @@ async function glusterCmd (glusterEndpoint, cmd, ignoreError = false) {
|
||||
return result
|
||||
}
|
||||
|
||||
const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure, xapi, pif, vlan, networkPrefix) {
|
||||
const createNetworkAndInsertHosts = defer(async function ($defer, xapi, pif, vlan, networkPrefix) {
|
||||
let hostIpLastNumber = HOST_FIRST_NUMBER
|
||||
const xosanNetwork = await xapi.createNetwork({
|
||||
name: 'XOSAN network',
|
||||
@ -317,7 +317,7 @@ const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure,
|
||||
mtu: pif.mtu,
|
||||
vlan: +vlan,
|
||||
})
|
||||
$onFailure(() => xapi.deleteNetwork(xosanNetwork))
|
||||
$defer.onFailure(() => xapi.deleteNetwork(xosanNetwork))
|
||||
const addresses = xosanNetwork.$PIFs.map(pif => ({pif, address: networkPrefix + (hostIpLastNumber++)}))
|
||||
await asyncMap(addresses, addressAndPif => reconfigurePifIP(xapi, addressAndPif.pif, addressAndPif.address))
|
||||
const master = xapi.pool.$master
|
||||
@ -354,10 +354,10 @@ async function getOrCreateSshKey (xapi) {
|
||||
return sshKey
|
||||
}
|
||||
|
||||
const _probePoolAndWaitForPresence = defer.onFailure(async function ($onFailure, glusterEndpoint, addresses) {
|
||||
const _probePoolAndWaitForPresence = defer(async function ($defer, glusterEndpoint, addresses) {
|
||||
await asyncMap(addresses, async (address) => {
|
||||
await glusterCmd(glusterEndpoint, 'peer probe ' + address)
|
||||
$onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + address, true))
|
||||
$defer.onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + address, true))
|
||||
})
|
||||
|
||||
function shouldRetry (peers) {
|
||||
@ -416,7 +416,7 @@ async function configureGluster (redundancy, ipAndHosts, glusterEndpoint, gluste
|
||||
await glusterCmd(glusterEndpoint, 'volume start xosan')
|
||||
}
|
||||
|
||||
export const createSR = defer.onFailure(async function ($onFailure, {
|
||||
export const createSR = defer(async function ($defer, {
|
||||
template, pif, vlan, srs, glusterType,
|
||||
redundancy, brickSize = this::computeBrickSize(srs), memorySize = 2 * GIGABYTE, ipRange = DEFAULT_NETWORK_PREFIX + '.0',
|
||||
}) {
|
||||
@ -444,7 +444,7 @@ export const createSR = defer.onFailure(async function ($onFailure, {
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
|
||||
try {
|
||||
const xosanNetwork = await createNetworkAndInsertHosts(xapi, pif, vlan, networkPrefix)
|
||||
$onFailure(() => xapi.deleteNetwork(xosanNetwork))
|
||||
$defer.onFailure(() => xapi.deleteNetwork(xosanNetwork))
|
||||
const sshKey = await getOrCreateSshKey(xapi)
|
||||
const srsObjects = map(srs, srId => xapi.getObject(srId))
|
||||
await Promise.all(srsObjects.map(sr => callPlugin(xapi, sr.$PBDs[0].$host, 'receive_ssh_keys', {
|
||||
@ -456,11 +456,11 @@ export const createSR = defer.onFailure(async function ($onFailure, {
|
||||
const firstSr = srsObjects[0]
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
|
||||
const firstVM = await this::_importGlusterVM(xapi, template, firstSr)
|
||||
$onFailure(() => xapi.deleteVm(firstVM, true))
|
||||
$defer.onFailure(() => xapi.deleteVm(firstVM, true))
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
|
||||
const copiedVms = await asyncMap(srsObjects.slice(1), sr =>
|
||||
copyVm(xapi, firstVM, sr)::tap(({vm}) =>
|
||||
$onFailure(() => xapi.deleteVm(vm))
|
||||
$defer.onFailure(() => xapi.deleteVm(vm))
|
||||
)
|
||||
)
|
||||
const vmsAndSrs = [{
|
||||
@ -473,7 +473,7 @@ export const createSR = defer.onFailure(async function ($onFailure, {
|
||||
const sr = firstSr
|
||||
const arbiterIP = networkPrefix + (vmIpLastNumber++)
|
||||
const arbiterVm = await xapi.copyVm(firstVM, sr)
|
||||
$onFailure(() => xapi.deleteVm(arbiterVm, true))
|
||||
$defer.onFailure(() => xapi.deleteVm(arbiterVm, true))
|
||||
arbiter = await _prepareGlusterVm(xapi, sr, arbiterVm, xosanNetwork, arbiterIP, {
|
||||
labelSuffix: '_arbiter',
|
||||
increaseDataDisk: false,
|
||||
@ -498,7 +498,7 @@ export const createSR = defer.onFailure(async function ($onFailure, {
|
||||
'xosan', '', true, {})
|
||||
debug('sr created')
|
||||
// we just forget because the cleanup actions are stacked in the $onFailure system
|
||||
$onFailure(() => xapi.forgetSr(xosanSrRef))
|
||||
$defer.onFailure(() => xapi.forgetSr(xosanSrRef))
|
||||
if (arbiter) {
|
||||
ipAndHosts.push(arbiter)
|
||||
}
|
||||
@ -802,7 +802,7 @@ const _median = arr => {
|
||||
return arr[Math.floor(arr.length / 2)]
|
||||
}
|
||||
|
||||
const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xosansr, lvmsrId, {
|
||||
const insertNewGlusterVm = defer(async function ($defer, xapi, xosansr, lvmsrId, {
|
||||
labelSuffix = '',
|
||||
glusterEndpoint = null, ipAddress = null, increaseDataDisk = true, brickSize = Infinity,
|
||||
}) {
|
||||
@ -822,7 +822,7 @@ const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xos
|
||||
const srObject = xapi.getObject(lvmsrId)
|
||||
// can't really copy an existing VM, because existing gluster VMs disks might too large to be copied.
|
||||
const newVM = await this::_importGlusterVM(xapi, data.template, lvmsrId)
|
||||
$onFailure(() => xapi.deleteVm(newVM, true))
|
||||
$defer.onFailure(() => xapi.deleteVm(newVM, true))
|
||||
const addressAndHost = await _prepareGlusterVm(xapi, srObject, newVM, xosanNetwork, ipAddress, {
|
||||
labelSuffix,
|
||||
increaseDataDisk,
|
||||
@ -836,7 +836,7 @@ const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xos
|
||||
return {data, newVM, addressAndHost, glusterEndpoint}
|
||||
})
|
||||
|
||||
export const addBricks = defer.onFailure(async function ($onFailure, {xosansr, lvmsrs, brickSize}) {
|
||||
export const addBricks = defer(async function ($defer, {xosansr, lvmsrs, brickSize}) {
|
||||
const OPERATION_OBJECT = {
|
||||
operation: 'addBricks',
|
||||
states: ['insertingNewVms', 'addingBricks', 'scaningSr'],
|
||||
@ -857,8 +857,8 @@ export const addBricks = defer.onFailure(async function ($onFailure, {xosansr, l
|
||||
const ipAddress = _findIPAddressOutsideList(usedAddresses.concat(newAddresses), data.networkPrefix)
|
||||
newAddresses.push(ipAddress)
|
||||
const {newVM, addressAndHost} = await this::insertNewGlusterVm(xapi, xosansr, newSr, {ipAddress, brickSize})
|
||||
$onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + ipAddress, true))
|
||||
$onFailure(() => xapi.deleteVm(newVM, true))
|
||||
$defer.onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + ipAddress, true))
|
||||
$defer.onFailure(() => xapi.deleteVm(newVM, true))
|
||||
const brickName = addressAndHost.brickName
|
||||
newNodes.push({brickName, host: addressAndHost.host.$id, vm: {id: newVM.$id, ip: ipAddress}, underlyingSr: newSr})
|
||||
}
|
||||
@ -901,7 +901,7 @@ addBricks.resolve = {
|
||||
lvmsrs: ['sr', 'SR', 'administrate'],
|
||||
}
|
||||
|
||||
export const removeBricks = defer.onFailure(async function ($onFailure, {xosansr, bricks}) {
|
||||
export const removeBricks = defer(async function ($defer, {xosansr, bricks}) {
|
||||
const xapi = this.getXapi(xosansr)
|
||||
if (CURRENT_POOL_OPERATIONS[xapi.pool.$id]) {
|
||||
throw new Error('this there is already a XOSAN operation running on this pool')
|
||||
|
@ -792,8 +792,8 @@ export default class Xapi extends XapiBase {
|
||||
|
||||
// Create a snapshot of the VM and returns a delta export object.
|
||||
@cancellable
|
||||
@deferrable.onFailure
|
||||
async exportDeltaVm ($onFailure, $cancelToken, vmId, baseVmId = undefined, {
|
||||
@deferrable
|
||||
async exportDeltaVm ($defer, $cancelToken, vmId, baseVmId = undefined, {
|
||||
bypassVdiChainsCheck = false,
|
||||
|
||||
// Contains a vdi.$id set of vmId.
|
||||
@ -807,7 +807,7 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
const vm = await this.snapshotVm(vmId)
|
||||
$onFailure(() => this._deleteVm(vm))
|
||||
$defer.onFailure(() => this._deleteVm(vm))
|
||||
if (snapshotNameLabel) {
|
||||
this._setObjectProperties(vm, {
|
||||
nameLabel: snapshotNameLabel,
|
||||
@ -878,7 +878,7 @@ export default class Xapi extends XapiBase {
|
||||
$SR$uuid: vdi.$SR.uuid,
|
||||
}
|
||||
const stream = streams[`${vdiRef}.vhd`] = this._exportVdi($cancelToken, vdi, baseVdi, VDI_FORMAT_VHD)
|
||||
$onFailure(stream.cancel)
|
||||
$defer.onFailure(stream.cancel)
|
||||
})
|
||||
|
||||
const vifs = {}
|
||||
@ -908,8 +908,8 @@ export default class Xapi extends XapiBase {
|
||||
})
|
||||
}
|
||||
|
||||
@deferrable.onFailure
|
||||
async importDeltaVm ($onFailure, delta, {
|
||||
@deferrable
|
||||
async importDeltaVm ($defer, delta, {
|
||||
deleteBase = false,
|
||||
disableStartAfterImport = true,
|
||||
mapVdisSrs = {},
|
||||
@ -950,7 +950,7 @@ export default class Xapi extends XapiBase {
|
||||
is_a_template: false,
|
||||
})
|
||||
)
|
||||
$onFailure(() => this._deleteVm(vm))
|
||||
$defer.onFailure(() => this._deleteVm(vm))
|
||||
|
||||
await Promise.all([
|
||||
this._setObjectProperties(vm, {
|
||||
@ -983,7 +983,7 @@ export default class Xapi extends XapiBase {
|
||||
},
|
||||
sr: mapVdisSrs[vdi.uuid] || srId,
|
||||
})
|
||||
$onFailure(() => this._deleteVdi(newVdi))
|
||||
$defer.onFailure(() => this._deleteVdi(newVdi))
|
||||
|
||||
return newVdi
|
||||
}
|
||||
@ -999,7 +999,7 @@ export default class Xapi extends XapiBase {
|
||||
const newVdi = await this._getOrWaitObject(
|
||||
await this._cloneVdi(baseVdi)
|
||||
)
|
||||
$onFailure(() => this._deleteVdi(newVdi))
|
||||
$defer.onFailure(() => this._deleteVdi(newVdi))
|
||||
|
||||
await this._updateObjectMapProperty(newVdi, 'other_config', {
|
||||
[TAG_COPY_SRC]: vdi.uuid,
|
||||
@ -1238,8 +1238,8 @@ export default class Xapi extends XapiBase {
|
||||
return vmRef
|
||||
}
|
||||
|
||||
@deferrable.onFailure
|
||||
async _importOvaVm ($onFailure, stream, {
|
||||
@deferrable
|
||||
async _importOvaVm ($defer, stream, {
|
||||
descriptionLabel,
|
||||
disks,
|
||||
memory,
|
||||
@ -1260,7 +1260,7 @@ export default class Xapi extends XapiBase {
|
||||
VCPUs_max: nCpus,
|
||||
})
|
||||
)
|
||||
$onFailure(() => this._deleteVm(vm))
|
||||
$defer.onFailure(() => this._deleteVm(vm))
|
||||
// Disable start and change the VM name label during import.
|
||||
await Promise.all([
|
||||
this.addForbiddenOperationToVm(vm.$id, 'start', 'OVA import in progress...'),
|
||||
@ -1277,7 +1277,7 @@ export default class Xapi extends XapiBase {
|
||||
name_label: disk.nameLabel,
|
||||
sr: sr.$ref,
|
||||
})
|
||||
$onFailure(() => this._deleteVdi(vdi))
|
||||
$defer.onFailure(() => this._deleteVdi(vdi))
|
||||
|
||||
return this._createVbd(vm, vdi, { position: disk.position })
|
||||
}).concat(map(networks, (networkId, i) => (
|
||||
@ -1959,8 +1959,8 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
)
|
||||
}
|
||||
@deferrable.onFailure
|
||||
async createNetwork ($onFailure, {
|
||||
@deferrable
|
||||
async createNetwork ($defer, {
|
||||
name,
|
||||
description = 'Created with Xen Orchestra',
|
||||
pifId,
|
||||
@ -1973,7 +1973,7 @@ export default class Xapi extends XapiBase {
|
||||
MTU: asInteger(mtu),
|
||||
other_config: {},
|
||||
})
|
||||
$onFailure(() => this.call('network.destroy', networkRef))
|
||||
$defer.onFailure(() => this.call('network.destroy', networkRef))
|
||||
if (pifId) {
|
||||
await this.call('pool.create_VLAN_from_PIF', this.getObject(pifId).$ref, networkRef, asInteger(vlan))
|
||||
}
|
||||
@ -2017,15 +2017,15 @@ export default class Xapi extends XapiBase {
|
||||
)
|
||||
}
|
||||
|
||||
@deferrable.onFailure
|
||||
async createBondedNetwork ($onFailure, {
|
||||
@deferrable
|
||||
async createBondedNetwork ($defer, {
|
||||
bondMode,
|
||||
mac = '',
|
||||
pifIds,
|
||||
...params
|
||||
}) {
|
||||
const network = await this.createNetwork(params)
|
||||
$onFailure(() => this.deleteNetwork(network))
|
||||
$defer.onFailure(() => this.deleteNetwork(network))
|
||||
// TODO: test and confirm:
|
||||
// Bond.create is called here with PIFs from one host but XAPI should then replicate the
|
||||
// bond on each host in the same pool with the corresponding PIFs (ie same interface names?).
|
||||
@ -2116,15 +2116,15 @@ export default class Xapi extends XapiBase {
|
||||
}
|
||||
|
||||
// Generic Config Drive
|
||||
@deferrable.onFailure
|
||||
async createCloudInitConfigDrive ($onFailure, vmId, srId, config) {
|
||||
@deferrable
|
||||
async createCloudInitConfigDrive ($defer, vmId, srId, config) {
|
||||
const vm = this.getObject(vmId)
|
||||
const sr = this.getObject(srId)
|
||||
|
||||
// First, create a small VDI (10MB) which will become the ConfigDrive
|
||||
const buffer = fatfsBufferInit()
|
||||
const vdi = await this.createVdi(buffer.length, { name_label: 'XO CloudConfigDrive', sr: sr.$ref })
|
||||
$onFailure(() => this._deleteVdi(vdi))
|
||||
$defer.onFailure(() => this._deleteVdi(vdi))
|
||||
|
||||
// Then, generate a FAT fs
|
||||
const fs = promisifyAll(fatfs.createFileSystem(fatfsBuffer(buffer)))
|
||||
@ -2146,14 +2146,14 @@ export default class Xapi extends XapiBase {
|
||||
await this._createVbd(vm, vdi)
|
||||
}
|
||||
|
||||
@deferrable.onFailure
|
||||
async createTemporaryVdiOnSr ($onFailure, stream, sr, name_label, name_description) {
|
||||
@deferrable
|
||||
async createTemporaryVdiOnSr ($defer, stream, sr, name_label, name_description) {
|
||||
const vdi = await this.createVdi(stream.length, {
|
||||
sr: sr.$ref,
|
||||
name_label,
|
||||
name_description,
|
||||
})
|
||||
$onFailure(() => this._deleteVdi(vdi))
|
||||
$defer.onFailure(() => this._deleteVdi(vdi))
|
||||
|
||||
await this.importVdiContent(vdi.$id, stream, { format: VDI_FORMAT_RAW })
|
||||
|
||||
|
@ -27,8 +27,8 @@ const XEN_VIDEORAM_VALUES = [1, 2, 4, 8, 16]
|
||||
|
||||
export default {
|
||||
// TODO: clean up on error.
|
||||
@deferrable.onFailure
|
||||
async createVm ($onFailure, templateId, {
|
||||
@deferrable
|
||||
async createVm ($defer, templateId, {
|
||||
name_label, // eslint-disable-line camelcase
|
||||
nameLabel = name_label, // eslint-disable-line camelcase
|
||||
|
||||
@ -62,7 +62,7 @@ export default {
|
||||
|
||||
// Clones the template.
|
||||
const vmRef = await this[clone ? '_cloneVm' : '_copyVm'](template, nameLabel)
|
||||
$onFailure(() => this.deleteVm(vmRef))
|
||||
$defer.onFailure(() => this.deleteVm(vmRef))
|
||||
|
||||
// TODO: copy BIOS strings?
|
||||
|
||||
|
@ -399,8 +399,8 @@ export default class {
|
||||
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
@deferrable.onFailure
|
||||
async deltaCopyVm ($onFailure, srcVm, targetSr, force = false, retention = 1) {
|
||||
@deferrable
|
||||
async deltaCopyVm ($defer, srcVm, targetSr, force = false, retention = 1) {
|
||||
const transferStart = Date.now()
|
||||
const srcXapi = this._xo.getXapi(srcVm)
|
||||
const targetXapi = this._xo.getXapi(targetSr)
|
||||
@ -426,8 +426,8 @@ export default class {
|
||||
bypassVdiChainsCheck: force,
|
||||
snapshotNameLabel: `XO_DELTA_EXPORT: ${targetSr.name_label} (${targetSr.uuid})`,
|
||||
})
|
||||
$onFailure(() => srcXapi.deleteVm(delta.vm.uuid))
|
||||
$onFailure(cancel)
|
||||
$defer.onFailure(() => srcXapi.deleteVm(delta.vm.uuid))
|
||||
$defer.onFailure(cancel)
|
||||
|
||||
const date = safeDateFormat(Date.now())
|
||||
delta.vm.name_label += ` (${date})`
|
||||
@ -774,8 +774,8 @@ export default class {
|
||||
}
|
||||
}
|
||||
|
||||
@deferrable.onFailure
|
||||
async rollingDeltaVmBackup ($onFailure, {vm, remoteId, tag, retention}) {
|
||||
@deferrable
|
||||
async rollingDeltaVmBackup ($defer, {vm, remoteId, tag, retention}) {
|
||||
const transferStart = Date.now()
|
||||
const handler = await this._xo.getRemoteHandler(remoteId)
|
||||
const xapi = this._xo.getXapi(vm)
|
||||
@ -817,8 +817,8 @@ export default class {
|
||||
fullVdisRequired,
|
||||
disableBaseTags: true,
|
||||
})
|
||||
$onFailure(() => xapi.deleteVm(delta.vm.uuid))
|
||||
$onFailure(cancel)
|
||||
$defer.onFailure(() => xapi.deleteVm(delta.vm.uuid))
|
||||
$defer.onFailure(cancel)
|
||||
|
||||
// Save vdis.
|
||||
const vdiBackups = await pSettle(
|
||||
@ -857,7 +857,7 @@ export default class {
|
||||
}
|
||||
}
|
||||
|
||||
$onFailure(() => asyncMap(fulFilledVdiBackups, vdiBackup =>
|
||||
$defer.onFailure(() => asyncMap(fulFilledVdiBackups, vdiBackup =>
|
||||
handler.unlink(`${dir}/${vdiBackup.value()}`)::ignoreErrors()
|
||||
))
|
||||
|
||||
@ -869,7 +869,7 @@ export default class {
|
||||
const backupFormat = `${date}_${vm.name_label}`
|
||||
const infoPath = `${dir}/${backupFormat}${DELTA_BACKUP_EXT}`
|
||||
|
||||
$onFailure(() => handler.unlink(infoPath))
|
||||
$defer.onFailure(() => handler.unlink(infoPath))
|
||||
|
||||
// Write Metadata.
|
||||
await handler.outputFile(infoPath, JSON.stringify(delta, null, 2))
|
||||
@ -972,11 +972,11 @@ export default class {
|
||||
return this._backupVm(vm, handler, file, {compress, onlyMetadata})
|
||||
}
|
||||
|
||||
@deferrable.onFailure
|
||||
async _backupVm ($onFailure, vm, handler, file, {compress, onlyMetadata}) {
|
||||
@deferrable
|
||||
async _backupVm ($defer, vm, handler, file, {compress, onlyMetadata}) {
|
||||
const targetStream = await handler.createOutputStream(file)
|
||||
$onFailure.call(handler, 'unlink', file)
|
||||
$onFailure.call(targetStream, 'close')
|
||||
$defer.onFailure.call(handler, 'unlink', file)
|
||||
$defer.onFailure.call(targetStream, 'close')
|
||||
|
||||
const promise = eventToPromise(targetStream, 'finish')
|
||||
|
||||
|
@ -3069,9 +3069,9 @@ glogg@^1.0.0:
|
||||
dependencies:
|
||||
sparkles "^1.0.0"
|
||||
|
||||
golike-defer@^0.3.0:
|
||||
version "0.3.0"
|
||||
resolved "https://registry.yarnpkg.com/golike-defer/-/golike-defer-0.3.0.tgz#3043bf000e545ea8b4bddbda62f6198104c1a08a"
|
||||
golike-defer@^0.4.1:
|
||||
version "0.4.1"
|
||||
resolved "https://registry.yarnpkg.com/golike-defer/-/golike-defer-0.4.1.tgz#7a1cd435d61e461305805d980b133a0f3db4e1cc"
|
||||
|
||||
graceful-fs@4.X, graceful-fs@^4.0.0, graceful-fs@^4.1.0, graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.6:
|
||||
version "4.1.11"
|
||||
|
Loading…
Reference in New Issue
Block a user