feat(xo-server/metadata-backups): ability to restore metadata backup (#4096)

See #4004
This commit is contained in:
badrAZ
2019-03-29 11:21:03 +01:00
committed by Julien Fontanet
parent c2beb2a5fa
commit 2886ec116f
7 changed files with 331 additions and 16 deletions

View File

@@ -26,6 +26,7 @@
### Released packages
- xen-api v0.24.6
- vhd-lib v0.6.0
- @xen-orchestra/fs v0.8.0
- xo-server-usage-report v0.7.2

View File

@@ -45,6 +45,10 @@ maxTokenValidity = '0.5 year'
# https://developer.mozilla.org/fr/docs/Web/HTTP/Headers/Set-Cookie#Session_cookie
#sessionCookieValidity = '10 hours'
[backup]
# Delay for which backups listing on a remote is cached
listingDebounce = '1 min'
[[http.listen]]
port = 80

View File

@@ -101,3 +101,42 @@ runJob.params = {
type: 'string',
},
}
export async function list({ remotes }) {
return this.listMetadataBackups(remotes)
}
list.permission = 'admin'
list.params = {
remotes: {
type: 'array',
items: {
type: 'string',
},
},
}
export function restore({ id }) {
return this.restoreMetadataBackup(id)
}
restore.permission = 'admin'
restore.params = {
id: {
type: 'string',
},
}
function delete_({ id }) {
return this.deleteMetadataBackup(id)
}
delete_.permission = 'admin'
delete_.params = {
id: {
type: 'string',
},
}
export { delete_ as delete }

View File

@@ -1,14 +1,25 @@
import { cancelable } from 'promise-toolbox'
const PATH_DB_DUMP = '/pool/xmldbdump'
export default {
@cancelable
exportPoolMetadata($cancelToken) {
const { pool } = this
return this.getResource($cancelToken, '/pool/xmldbdump', {
task: this.createTask(
'Pool metadata',
pool.name_label ?? pool.$master.name_label
),
return this.getResource($cancelToken, PATH_DB_DUMP, {
task: this.createTask('Export pool metadata'),
})
},
// Restore the XAPI database from an XML backup
//
// See https://github.com/xapi-project/xen-api/blob/405b02e72f1ccc4f4b456fd52db30876faddcdd8/ocaml/xapi/pool_db_backup.ml#L170-L205
@cancelable
importPoolMetadata($cancelToken, stream, force = false) {
return this.putResource($cancelToken, stream, PATH_DB_DUMP, {
query: {
dry_run: String(!force),
},
task: this.createTask('Import pool metadata'),
})
},
}

View File

@@ -59,7 +59,7 @@ const taskTimeComparator = ({ start: s1, end: e1 }, { start: s2, end: e2 }) => {
// id: string,
// jobId?: string,
// jobName?: string,
// message?: 'restore',
// message?: 'backup' | 'metadataRestore' | 'restore',
// scheduleId?: string,
// start: number,
// status: 'pending' | 'failure' | 'interrupted' | 'skipped' | 'success',
@@ -67,12 +67,13 @@ const taskTimeComparator = ({ start: s1, end: e1 }, { start: s2, end: e2 }) => {
// }
export default {
async getBackupNgLogs(runId?: string) {
const [jobLogs, restoreLogs] = await Promise.all([
const [jobLogs, restoreLogs, restoreMetadataLogs] = await Promise.all([
this.getLogs('jobs'),
this.getLogs('restore'),
this.getLogs('metadataRestore'),
])
const { runningJobs, runningRestores } = this
const { runningJobs, runningRestores, runningMetadataRestores } = this
const consolidated = {}
const started = {}
@@ -89,6 +90,7 @@ export default {
id,
jobId,
jobName: data.jobName,
message: 'backup',
scheduleId,
start: time,
status: runningJobs[jobId] === id ? 'pending' : 'interrupted',
@@ -117,7 +119,8 @@ export default {
if (parentId === undefined && (runId === undefined || runId === id)) {
// top level task
task.status =
message === 'restore' && !runningRestores.has(id)
(message === 'restore' && !runningRestores.has(id)) ||
(message === 'metadataRestore' && !runningMetadataRestores.has(id))
? 'interrupted'
: 'pending'
consolidated[id] = started[id] = task
@@ -184,6 +187,7 @@ export default {
forEach(jobLogs, handleLog)
forEach(restoreLogs, handleLog)
forEach(restoreMetadataLogs, handleLog)
return runId === undefined ? consolidated : consolidated[runId]
},

View File

@@ -1,11 +1,15 @@
// @flow
import asyncMap from '@xen-orchestra/async-map'
import createLogger from '@xen-orchestra/log'
import defer from 'golike-defer'
import { fromEvent, ignoreErrors } from 'promise-toolbox'
import debounceWithKey from '../_pDebounceWithKey'
import parseDuration from '../_parseDuration'
import { type Xapi } from '../xapi'
import {
safeDateFormat,
serializeError,
type SimpleIdPattern,
unboxIdsFromPattern,
} from '../utils'
@@ -13,8 +17,14 @@ import {
import { type Executor, type Job } from './jobs'
import { type Schedule } from './scheduling'
const log = createLogger('xo:xo-mixins:metadata-backups')
const DIR_XO_CONFIG_BACKUPS = 'xo-config-backups'
const DIR_XO_POOL_METADATA_BACKUPS = 'xo-pool-metadata-backups'
const METADATA_BACKUP_JOB_TYPE = 'metadataBackup'
const compareTimestamp = (a, b) => a.timestamp - b.timestamp
type Settings = {|
retentionXoMetadata?: number,
retentionPoolMetadata?: number,
@@ -29,6 +39,26 @@ type MetadataBackupJob = {
xoMetadata?: boolean,
}
const createSafeReaddir = (handler, methodName) => (path, options) =>
handler.list(path, options).catch(error => {
if (error?.code !== 'ENOENT') {
log.warn(`${methodName} ${path}`, { error })
}
return []
})
// metadata.json
//
// {
// jobId: String,
// jobName: String,
// scheduleId: String,
// scheduleName: String,
// timestamp: number,
// pool?: <Pool />
// poolMaster?: <Host />
// }
//
// File structure on remotes:
//
// <remote>
@@ -43,7 +73,6 @@ type MetadataBackupJob = {
// └─ <YYYYMMDD>T<HHmmss>
// ├─ metadata.json
// └─ data
export default class metadataBackup {
_app: {
createJob: (
@@ -63,9 +92,30 @@ export default class metadataBackup {
removeJob: (id: string) => Promise<void>,
}
constructor(app: any) {
get runningMetadataRestores() {
return this._runningMetadataRestores
}
constructor(app: any, { backup }) {
this._app = app
app.on('start', () => {
this._logger = undefined
this._runningMetadataRestores = new Set()
const debounceDelay = parseDuration(backup.listingDebounce)
this._listXoMetadataBackups = debounceWithKey(
this._listXoMetadataBackups,
debounceDelay,
remoteId => remoteId
)
this.__listPoolMetadataBackups = debounceWithKey(
this._listPoolMetadataBackups,
debounceDelay,
remoteId => remoteId
)
app.on('start', async () => {
this._logger = await app.getLogger('metadataRestore')
app.registerJobExecutor(
METADATA_BACKUP_JOB_TYPE,
this._executor.bind(this)
@@ -106,7 +156,7 @@ export default class metadataBackup {
const files = []
if (job.xoMetadata && retentionXoMetadata > 0) {
const xoMetadataDir = `xo-config-backups/${schedule.id}`
const xoMetadataDir = `${DIR_XO_CONFIG_BACKUPS}/${schedule.id}`
const dir = `${xoMetadataDir}/${formattedTimestamp}`
const data = JSON.stringify(await app.exportConfig(), null, 2)
@@ -131,7 +181,7 @@ export default class metadataBackup {
files.push(
...(await Promise.all(
poolIds.map(async id => {
const poolMetadataDir = `xo-pool-metadata-backups/${
const poolMetadataDir = `${DIR_XO_POOL_METADATA_BACKUPS}/${
schedule.id
}/${id}`
const dir = `${poolMetadataDir}/${formattedTimestamp}`
@@ -261,4 +311,210 @@ export default class metadataBackup {
}),
])
}
// xoBackups
// [{
// id: `${remoteId}/folderPath`,
// jobId,
// jobName,
// scheduleId,
// scheduleName,
// timestamp
// }]
async _listXoMetadataBackups(remoteId, handler) {
const safeReaddir = createSafeReaddir(handler, 'listXoMetadataBackups')
const backups = []
await asyncMap(
safeReaddir(DIR_XO_CONFIG_BACKUPS, { prependDir: true }),
scheduleDir =>
asyncMap(
safeReaddir(scheduleDir, { prependDir: true }),
async backupDir => {
try {
backups.push({
id: `${remoteId}${backupDir}`,
...JSON.parse(
String(await handler.readFile(`${backupDir}/metadata.json`))
),
})
} catch (error) {
log.warn(`listXoMetadataBackups ${backupDir}`, { error })
}
}
)
)
return backups.sort(compareTimestamp)
}
// poolBackups
// {
// [<Pool ID>]: [{
// id: `${remoteId}/folderPath`,
// jobId,
// jobName,
// scheduleId,
// scheduleName,
// timestamp,
// pool,
// poolMaster,
// }]
// }
async _listPoolMetadataBackups(remoteId, handler) {
const safeReaddir = createSafeReaddir(handler, 'listXoMetadataBackups')
const backupsByPool = {}
await asyncMap(
safeReaddir(DIR_XO_POOL_METADATA_BACKUPS, { prependDir: true }),
scheduleDir =>
asyncMap(safeReaddir(scheduleDir), poolId => {
const backups = backupsByPool[poolId] ?? (backupsByPool[poolId] = [])
return asyncMap(
safeReaddir(`${scheduleDir}/${poolId}`, { prependDir: true }),
async backupDir => {
try {
backups.push({
id: `${remoteId}${backupDir}`,
...JSON.parse(
String(await handler.readFile(`${backupDir}/metadata.json`))
),
})
} catch (error) {
log.warn(`listPoolMetadataBackups ${backupDir}`, {
error,
})
}
}
)
})
)
// delete empty entries and sort backups
Object.keys(backupsByPool).forEach(poolId => {
const backups = backupsByPool[poolId]
if (backups.length === 0) {
delete backupsByPool[poolId]
} else {
backups.sort(compareTimestamp)
}
})
return backupsByPool
}
// {
// xo: {
// [remote ID]: xoBackups
// },
// pool: {
// [remote ID]: poolBackups
// }
// }
async listMetadataBackups(remoteIds: string[]) {
const app = this._app
const xo = {}
const pool = {}
await Promise.all(
remoteIds.map(async remoteId => {
try {
const handler = await app.getRemoteHandler(remoteId)
const [xoList, poolList] = await Promise.all([
this._listXoMetadataBackups(remoteId, handler),
this._listPoolMetadataBackups(remoteId, handler),
])
if (xoList.length !== 0) {
xo[remoteId] = xoList
}
if (Object.keys(poolList).length !== 0) {
pool[remoteId] = poolList
}
} catch (error) {
log.warn(`listMetadataBackups for remote ${remoteId}`, { error })
}
})
)
return {
xo,
pool,
}
}
// Task logs emitted in a restore execution:
//
// task.start(message: 'restore', data: <Metadata />)
// └─ task.end
async restoreMetadataBackup(id: string) {
const app = this._app
const logger = this._logger
const message = 'metadataRestore'
const [remoteId, dir, ...path] = id.split('/')
const handler = await app.getRemoteHandler(remoteId)
const metadataFolder = `${dir}/${path.join('/')}`
const taskId = logger.notice(message, {
event: 'task.start',
data: JSON.parse(
String(await handler.readFile(`${metadataFolder}/metadata.json`))
),
})
try {
this._runningMetadataRestores.add(taskId)
let result
if (dir === DIR_XO_CONFIG_BACKUPS) {
result = await app.importConfig(
JSON.parse(
String(await handler.readFile(`${metadataFolder}/data.json`))
)
)
} else {
result = await app
.getXapi(path[1])
.importPoolMetadata(
await handler.createReadStream(`${metadataFolder}/data`),
true
)
}
logger.notice(message, {
event: 'task.end',
result,
status: 'success',
taskId,
})
} catch (error) {
logger.error(message, {
event: 'task.end',
result: serializeError(error),
status: 'failure',
taskId,
})
throw error
} finally {
this._runningMetadataRestores.delete(taskId)
}
}
async deleteMetadataBackup(id: string) {
const uuidReg = '\\w{8}(-\\w{4}){3}-\\w{12}'
const metadataDirReg = 'xo-(config|pool-metadata)-backups'
const timestampReg = '\\d{8}T\\d{6}Z'
const regexp = new RegExp(
`^/?${uuidReg}/${metadataDirReg}/${uuidReg}(/${uuidReg})?/${timestampReg}`
)
if (!regexp.test(id)) {
throw new Error(`The id (${id}) not correspond to a metadata folder`)
}
const app = this._app
const [remoteId, ...path] = id.split('/')
const handler = await app.getRemoteHandler(remoteId)
return handler.rmtree(path.join('/'))
}
}

View File

@@ -153,7 +153,7 @@ export default decorate([
addSubscriptions({
logs: cb =>
subscribeBackupNgLogs(logs =>
cb(logs && filter(logs, log => log.message !== 'restore'))
cb(logs && filter(logs, log => log.message === 'backup'))
),
jobs: cb => subscribeBackupNgJobs(jobs => cb(keyBy(jobs, 'id'))),
metadataJobs: cb =>