feat(backups): update VM backups cache (#6411)
This commit is contained in:
parent
2742f948c2
commit
191c124130
@ -22,6 +22,7 @@ const zlib = require('zlib')
|
||||
|
||||
const { BACKUP_DIR } = require('./_getVmBackupDir.js')
|
||||
const { cleanVm } = require('./_cleanVm.js')
|
||||
const { formatFilenameDate } = require('./_filenameDate.js')
|
||||
const { getTmpDir } = require('./_getTmpDir.js')
|
||||
const { isMetadataFile } = require('./_backupType.js')
|
||||
const { isValidXva } = require('./_isValidXva.js')
|
||||
@ -34,7 +35,7 @@ exports.DIR_XO_CONFIG_BACKUPS = DIR_XO_CONFIG_BACKUPS
|
||||
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
|
||||
exports.DIR_XO_POOL_METADATA_BACKUPS = DIR_XO_POOL_METADATA_BACKUPS
|
||||
|
||||
const { warn } = createLogger('xo:backups:RemoteAdapter')
|
||||
const { debug, warn } = createLogger('xo:backups:RemoteAdapter')
|
||||
|
||||
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
|
||||
|
||||
@ -224,11 +225,30 @@ class RemoteAdapter {
|
||||
return promise
|
||||
}
|
||||
|
||||
#removeVmBackupsFromCache(backups) {
|
||||
for (const [dir, filenames] of Object.entries(
|
||||
groupBy(
|
||||
backups.map(_ => _._filename),
|
||||
dirname
|
||||
)
|
||||
)) {
|
||||
// detached async action, will not reject
|
||||
this._updateCache(dir + '/cache.json.gz', backups => {
|
||||
for (const filename of filenames) {
|
||||
debug('removing cache entry', { entry: filename })
|
||||
delete backups[filename]
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async deleteDeltaVmBackups(backups) {
|
||||
const handler = this._handler
|
||||
|
||||
// this will delete the json, unused VHDs will be detected by `cleanVm`
|
||||
await asyncMapSettled(backups, ({ _filename }) => handler.unlink(_filename))
|
||||
|
||||
this.#removeVmBackupsFromCache(backups)
|
||||
}
|
||||
|
||||
async deleteMetadataBackup(backupId) {
|
||||
@ -256,6 +276,8 @@ class RemoteAdapter {
|
||||
await asyncMapSettled(backups, ({ _filename, xva }) =>
|
||||
Promise.all([handler.unlink(_filename), handler.unlink(resolveRelativeFromFile(_filename, xva))])
|
||||
)
|
||||
|
||||
this.#removeVmBackupsFromCache(backups)
|
||||
}
|
||||
|
||||
deleteVmBackup(file) {
|
||||
@ -281,9 +303,6 @@ class RemoteAdapter {
|
||||
// don't merge in main process, unused VHDs will be merged in the next backup run
|
||||
await this.cleanVm(dir, { remove: true, logWarn: warn })
|
||||
}
|
||||
|
||||
const dedupedVmUuid = new Set(metadatas.map(_ => _.vm.uuid))
|
||||
await asyncMap(dedupedVmUuid, vmUuid => this.invalidateVmBackupListCache(vmUuid))
|
||||
}
|
||||
|
||||
#getCompressionType() {
|
||||
@ -458,11 +477,41 @@ class RemoteAdapter {
|
||||
return backupsByPool
|
||||
}
|
||||
|
||||
async _invalidateVmBackupListCacheDir(vmDir) {
|
||||
await this.handler.unlink(`${vmDir}/cache.json.gz`)
|
||||
#getVmBackupsCache(vmUuid) {
|
||||
return `${BACKUP_DIR}/${vmUuid}/cache.json.gz`
|
||||
}
|
||||
|
||||
async #readCache(path) {
|
||||
try {
|
||||
return JSON.parse(await fromCallback(zlib.gunzip, await this.handler.readFile(path)))
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
warn('#readCache', { error, path })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_updateCache = synchronized.withKey()(this._updateCache)
|
||||
// eslint-disable-next-line no-dupe-class-members
|
||||
async _updateCache(path, fn) {
|
||||
const cache = await this.#readCache(path)
|
||||
if (cache !== undefined) {
|
||||
fn(cache)
|
||||
|
||||
await this.#writeCache(path, cache)
|
||||
}
|
||||
}
|
||||
|
||||
async #writeCache(path, data) {
|
||||
try {
|
||||
await this.handler.writeFile(path, await fromCallback(zlib.gzip, JSON.stringify(data)), { flags: 'w' })
|
||||
} catch (error) {
|
||||
warn('#writeCache', { error, path })
|
||||
}
|
||||
}
|
||||
|
||||
async invalidateVmBackupListCache(vmUuid) {
|
||||
await this._invalidateVmBackupListCacheDir(`${BACKUP_DIR}/${vmUuid}`)
|
||||
await this.handler.unlink(this.#getVmBackupsCache(vmUuid))
|
||||
}
|
||||
|
||||
async #getCachabledDataListVmBackups(dir) {
|
||||
@ -501,41 +550,25 @@ class RemoteAdapter {
|
||||
// if cache is missing or broken => regenerate it and return
|
||||
|
||||
async _readCacheListVmBackups(vmUuid) {
|
||||
const dir = `${BACKUP_DIR}/${vmUuid}`
|
||||
const path = `${dir}/cache.json.gz`
|
||||
const path = this.#getVmBackupsCache(vmUuid)
|
||||
|
||||
try {
|
||||
const gzipped = await this.handler.readFile(path)
|
||||
const text = await fromCallback(zlib.gunzip, gzipped)
|
||||
return JSON.parse(text)
|
||||
} catch (error) {
|
||||
if (error.code !== 'ENOENT') {
|
||||
warn('Cache file was unreadable', { vmUuid, error })
|
||||
}
|
||||
const cache = await this.#readCache(path)
|
||||
if (cache !== undefined) {
|
||||
return cache
|
||||
}
|
||||
|
||||
// nothing cached, or cache unreadable => regenerate it
|
||||
const backups = await this.#getCachabledDataListVmBackups(dir)
|
||||
const backups = await this.#getCachabledDataListVmBackups(`${BACKUP_DIR}/${vmUuid}`)
|
||||
if (backups === undefined) {
|
||||
return
|
||||
}
|
||||
|
||||
// detached async action, will not reject
|
||||
this.#writeVmBackupsCache(path, backups)
|
||||
this.#writeCache(path, backups)
|
||||
|
||||
return backups
|
||||
}
|
||||
|
||||
async #writeVmBackupsCache(cacheFile, backups) {
|
||||
try {
|
||||
const text = JSON.stringify(backups)
|
||||
const zipped = await fromCallback(zlib.gzip, text)
|
||||
await this.handler.writeFile(cacheFile, zipped, { flags: 'w' })
|
||||
} catch (error) {
|
||||
warn('writeVmBackupsCache', { cacheFile, error })
|
||||
}
|
||||
}
|
||||
|
||||
async listVmBackups(vmUuid, predicate) {
|
||||
const backups = []
|
||||
const cached = await this._readCacheListVmBackups(vmUuid)
|
||||
@ -574,6 +607,28 @@ class RemoteAdapter {
|
||||
return backups.sort(compareTimestamp)
|
||||
}
|
||||
|
||||
async writeVmBackupMetadata(vmUuid, metadata) {
|
||||
const path = `/${BACKUP_DIR}/${vmUuid}/${formatFilenameDate(metadata.timestamp)}.json`
|
||||
|
||||
await this.handler.outputFile(path, JSON.stringify(metadata), {
|
||||
dirMode: this._dirMode,
|
||||
})
|
||||
|
||||
// will not throw
|
||||
this._updateCache(this.#getVmBackupsCache(vmUuid), backups => {
|
||||
debug('adding cache entry', { entry: path })
|
||||
backups[path] = {
|
||||
...metadata,
|
||||
|
||||
// these values are required in the cache
|
||||
_filename: path,
|
||||
id: path,
|
||||
}
|
||||
})
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
async writeVhd(path, input, { checksum = true, validator = noop } = {}) {
|
||||
const handler = this._handler
|
||||
|
||||
|
@ -533,8 +533,7 @@ exports.cleanVm = async function cleanVm(
|
||||
|
||||
// purge cache if a metadata file has been deleted
|
||||
if (mustInvalidateCache) {
|
||||
// cleanvm is always invoked as a method of RemoteAdapter
|
||||
await this._invalidateVmBackupListCacheDir(vmDir)
|
||||
await handler.unlink(vmDir + '/cache.json.gz')
|
||||
}
|
||||
|
||||
return {
|
||||
|
@ -158,7 +158,6 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
|
||||
}/${adapter.getVhdFileName(basename)}`
|
||||
)
|
||||
|
||||
const metadataFilename = (this._metadataFileName = `${backupDir}/${basename}.json`)
|
||||
const metadataContent = {
|
||||
jobId,
|
||||
mode: job.mode,
|
||||
@ -223,9 +222,7 @@ exports.DeltaBackupWriter = class DeltaBackupWriter extends MixinBackupWriter(Ab
|
||||
}
|
||||
})
|
||||
metadataContent.size = size
|
||||
await handler.outputFile(metadataFilename, JSON.stringify(metadataContent), {
|
||||
dirMode: backup.config.dirMode,
|
||||
})
|
||||
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadataContent)
|
||||
|
||||
// TODO: run cleanup?
|
||||
}
|
||||
|
@ -34,7 +34,6 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
|
||||
const { job, scheduleId, vm } = backup
|
||||
|
||||
const adapter = this._adapter
|
||||
const handler = adapter.handler
|
||||
const backupDir = getVmBackupDir(vm.uuid)
|
||||
|
||||
// TODO: clean VM backup directory
|
||||
@ -50,7 +49,7 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
|
||||
const dataBasename = basename + '.xva'
|
||||
const dataFilename = backupDir + '/' + dataBasename
|
||||
|
||||
const metadataFilename = (this._metadataFileName = `${backupDir}/${basename}.json`)
|
||||
const metadataFilename = `${backupDir}/${basename}.json`
|
||||
const metadata = {
|
||||
jobId: job.id,
|
||||
mode: job.mode,
|
||||
@ -74,9 +73,7 @@ exports.FullBackupWriter = class FullBackupWriter extends MixinBackupWriter(Abst
|
||||
return { size: sizeContainer.size }
|
||||
})
|
||||
metadata.size = sizeContainer.size
|
||||
await handler.outputFile(metadataFilename, JSON.stringify(metadata), {
|
||||
dirMode: backup.config.dirMode,
|
||||
})
|
||||
this._metadataFileName = await adapter.writeVmBackupMetadata(vm.uuid, metadata)
|
||||
|
||||
if (!deleteFirst) {
|
||||
await deleteOldBackups()
|
||||
|
@ -74,7 +74,6 @@ exports.MixinBackupWriter = (BaseClass = Object) =>
|
||||
const remotePath = handler._getRealPath()
|
||||
await MergeWorker.run(remotePath)
|
||||
}
|
||||
await this._adapter.invalidateVmBackupListCache(this._backup.vm.uuid)
|
||||
}
|
||||
|
||||
healthCheck(sr) {
|
||||
|
@ -7,6 +7,8 @@
|
||||
|
||||
> Users must be able to say: “Nice enhancement, I'm eager to test it”
|
||||
|
||||
- [Backup] Improve listing speed by updating caches instead of regenerating them on backup creation/deletion (PR [#6411](https://github.com/vatesfr/xen-orchestra/pull/6411))
|
||||
|
||||
### Bug fixes
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
Loading…
Reference in New Issue
Block a user