From ef942a620932a9b8f7a380a73fe05b6b79f723ce Mon Sep 17 00:00:00 2001 From: Julien Fontanet Date: Tue, 15 May 2018 14:40:11 +0200 Subject: [PATCH] feat(Backup NG): implrtment logs and reports (#2869) --- flow-typed/lodash.js | 8 + .../xo-server-backup-reports/package.json | 5 +- .../xo-server-backup-reports/src/index.js | 410 ++++++++-- packages/xo-server/src/api/backup-ng.js | 8 + packages/xo-server/src/api/log.js | 18 +- .../xo-server/src/schemas/log/jobStart.js | 7 +- packages/xo-server/src/schemas/log/taskEnd.js | 18 + .../xo-server/src/schemas/log/taskStart.js | 15 + packages/xo-server/src/xapi/index.js.flow | 2 +- .../src/xo-mixins/backups-ng/index.js | 725 +++++++++++------- .../xo-server/src/xo-mixins/jobs/index.js | 23 +- .../xo-server/src/xo-mixins/logs/index.js | 26 +- packages/xo-web/src/common/intl/messages.js | 23 + packages/xo-web/src/common/xo/index.js | 4 + packages/xo-web/src/xo-app/backup-ng/index.js | 2 +- .../xo-web/src/xo-app/backup-ng/new/index.js | 61 +- .../xo-web/src/xo-app/logs/backup-ng-logs.js | 199 +++++ packages/xo-web/src/xo-app/logs/index.js | 23 +- .../xo-web/src/xo-app/logs/log-alert-body.js | 348 +++++++++ packages/xo-web/src/xo-app/logs/utils.js | 7 + 20 files changed, 1591 insertions(+), 341 deletions(-) create mode 100644 packages/xo-server/src/schemas/log/taskEnd.js create mode 100644 packages/xo-server/src/schemas/log/taskStart.js create mode 100644 packages/xo-web/src/xo-app/logs/backup-ng-logs.js create mode 100644 packages/xo-web/src/xo-app/logs/log-alert-body.js create mode 100644 packages/xo-web/src/xo-app/logs/utils.js diff --git a/flow-typed/lodash.js b/flow-typed/lodash.js index b990fb648..fc94fffc4 100644 --- a/flow-typed/lodash.js +++ b/flow-typed/lodash.js @@ -1,4 +1,12 @@ declare module 'lodash' { + declare export function forEach( + object: { [K]: V }, + iteratee: (V, K) => void + ): void + declare export function groupBy( + object: { [K]: V }, + iteratee: K | ((V, K) => string) + ): { [string]: V[] } declare export function invert(object: { [K]: V }): { [V]: K } declare export function isEmpty(mixed): boolean declare export function keyBy(array: T[], iteratee: string): boolean diff --git a/packages/xo-server-backup-reports/package.json b/packages/xo-server-backup-reports/package.json index 1025c8c9a..135b5a6ee 100644 --- a/packages/xo-server-backup-reports/package.json +++ b/packages/xo-server-backup-reports/package.json @@ -35,6 +35,7 @@ "node": ">=4" }, "dependencies": { + "babel-runtime": "^6.26.0", "human-format": "^0.10.0", "lodash": "^4.13.1", "moment-timezone": "^0.5.13" @@ -42,6 +43,7 @@ "devDependencies": { "babel-cli": "^6.24.1", "babel-plugin-lodash": "^3.3.2", + "babel-plugin-transform-runtime": "^6.23.0", "babel-preset-env": "^1.5.2", "cross-env": "^5.1.3", "rimraf": "^2.6.1" @@ -56,7 +58,8 @@ }, "babel": { "plugins": [ - "lodash" + "lodash", + "transform-runtime" ], "presets": [ [ diff --git a/packages/xo-server-backup-reports/src/index.js b/packages/xo-server-backup-reports/src/index.js index c959cc09b..f42dcc8e7 100644 --- a/packages/xo-server-backup-reports/src/index.js +++ b/packages/xo-server-backup-reports/src/index.js @@ -1,6 +1,6 @@ import humanFormat from 'human-format' import moment from 'moment-timezone' -import { forEach, startCase } from 'lodash' +import { find, forEach, get, startCase } from 'lodash' import pkg from '../package' @@ -41,9 +41,9 @@ const DATE_FORMAT = 'dddd, MMMM Do YYYY, h:mm:ss a' const createDateFormater = timezone => timezone !== undefined ? timestamp => - moment(timestamp) - .tz(timezone) - .format(DATE_FORMAT) + moment(timestamp) + .tz(timezone) + .format(DATE_FORMAT) : timestamp => moment(timestamp).format(DATE_FORMAT) const formatDuration = milliseconds => moment.duration(milliseconds).humanize() @@ -66,6 +66,7 @@ const logError = e => { console.error('backup report error:', e) } +const NO_VMS_MATCH_THIS_PATTERN = 'no VMs match this pattern' const NO_SUCH_OBJECT_ERROR = 'no such object' const UNHEALTHY_VDI_CHAIN_ERROR = 'unhealthy VDI chain' const UNHEALTHY_VDI_CHAIN_MESSAGE = @@ -94,14 +95,351 @@ class BackupReportsXoPlugin { this._xo.removeListener('job:terminated', this._report) } - _wrapper (status) { - return new Promise(resolve => resolve(this._listener(status))).catch( - logError - ) + _wrapper (status, job, schedule) { + return new Promise(resolve => + resolve( + job.type === 'backup' + ? this._backupNgListener(status, job, schedule) + : this._listener(status, job, schedule) + ) + ).catch(logError) + } + + async _backupNgListener (runJobId, _, { timezone }) { + const xo = this._xo + const logs = await xo.getBackupNgLogs(runJobId) + const jobLog = logs['roots'][0] + const vmsTaskLog = logs[jobLog.id] + + const { reportWhen, mode } = jobLog.data || {} + if (reportWhen === 'never') { + return + } + + const formatDate = createDateFormater(timezone) + const jobName = (await xo.getJob(jobLog.jobId, 'backup')).name + + if (jobLog.error !== undefined) { + const [globalStatus, icon] = + jobLog.error.message === NO_VMS_MATCH_THIS_PATTERN + ? ['Skipped', ICON_SKIPPED] + : ['Failure', ICON_FAILURE] + let markdown = [ + `## Global status: ${globalStatus}`, + '', + `- **mode**: ${mode}`, + `- **Start time**: ${formatDate(jobLog.start)}`, + `- **End time**: ${formatDate(jobLog.end)}`, + `- **Duration**: ${formatDuration(jobLog.duration)}`, + `- **Error**: ${jobLog.error.message}`, + '---', + '', + `*${pkg.name} v${pkg.version}*`, + ] + + markdown = markdown.join('\n') + return this._sendReport({ + subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${jobName} ${icon}`, + markdown, + nagiosStatus: 2, + nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Backup report for ${jobName} - Error : ${ + jobLog.error.message + }`, + }) + } + + const failedVmsText = [] + const skippedVmsText = [] + const successfulVmsText = [] + const nagiosText = [] + + let globalMergeSize = 0 + let globalTransferSize = 0 + let nFailures = 0 + let nSkipped = 0 + + for (const vmTaskLog of vmsTaskLog || []) { + const vmTaskStatus = vmTaskLog.status + if (vmTaskStatus === 'success' && reportWhen === 'failure') { + return + } + + const vmId = vmTaskLog.data.id + let vm + try { + vm = xo.getObject(vmId) + } catch (e) {} + const text = [ + `### ${vm !== undefined ? vm.name_label : 'VM not found'}`, + '', + `- **UUID**: ${vm !== undefined ? vm.uuid : vmId}`, + `- **Start time**: ${formatDate(vmTaskLog.start)}`, + `- **End time**: ${formatDate(vmTaskLog.end)}`, + `- **Duration**: ${formatDuration(vmTaskLog.duration)}`, + ] + + const failedSubTasks = [] + const operationsText = [] + const srsText = [] + const remotesText = [] + for (const subTaskLog of logs[vmTaskLog.taskId] || []) { + const { data, status, result, message } = subTaskLog + const icon = + subTaskLog.status === 'success' ? ICON_SUCCESS : ICON_FAILURE + const errorMessage = ` **Error**: ${get(result, 'message')}` + + if (message === 'snapshot') { + operationsText.push(`- **Snapshot** ${icon}`) + if (status === 'failure') { + failedSubTasks.push('Snapshot') + operationsText.push('', errorMessage) + } + } else if (data.type === 'remote') { + const remoteId = data.id + const remote = await xo.getRemote(remoteId).catch(() => {}) + remotesText.push( + `- **${ + remote !== undefined ? remote.name : `Remote Not found` + }** (${remoteId}) ${icon}` + ) + if (status === 'failure') { + failedSubTasks.push(remote !== undefined ? remote.name : remoteId) + remotesText.push('', errorMessage) + } + } else { + const srId = data.id + let sr + try { + sr = xo.getObject(srId) + } catch (e) {} + const [srName, srUuid] = + sr !== undefined ? [sr.name_label, sr.uuid] : [`SR Not found`, srId] + srsText.push(`- **${srName}** (${srUuid}) ${icon}`) + if (status === 'failure') { + failedSubTasks.push(sr !== undefined ? sr.name_label : srId) + srsText.push('', errorMessage) + } + } + } + + if (operationsText.length !== 0) { + operationsText.unshift(`#### Operations`, '') + } + if (srsText.length !== 0) { + srsText.unshift(`#### SRs`, '') + } + if (remotesText.length !== 0) { + remotesText.unshift(`#### remotes`, '') + } + const subText = [...operationsText, '', ...srsText, '', ...remotesText] + const result = vmTaskLog.result + if (vmTaskStatus === 'failure' && result !== undefined) { + const { message } = result + if (isSkippedError(result)) { + ++nSkipped + skippedVmsText.push( + ...text, + `- **Reason**: ${ + message === UNHEALTHY_VDI_CHAIN_ERROR + ? UNHEALTHY_VDI_CHAIN_MESSAGE + : message + }`, + '' + ) + nagiosText.push( + `[(Skipped) ${ + vm !== undefined ? vm.name_label : 'undefined' + } : ${message} ]` + ) + } else { + ++nFailures + failedVmsText.push(...text, `- **Error**: ${message}`, '') + + nagiosText.push( + `[(Failed) ${ + vm !== undefined ? vm.name_label : 'undefined' + } : ${message} ]` + ) + } + } else { + let transferSize, transferDuration, mergeSize, mergeDuration + + forEach(logs[vmTaskLog.taskId], ({ taskId }) => { + if (transferSize !== undefined) { + return false + } + + const transferTask = find(logs[taskId], { message: 'transfer' }) + if (transferTask !== undefined) { + transferSize = transferTask.result.size + transferDuration = transferTask.end - transferTask.start + } + + const mergeTask = find(logs[taskId], { message: 'merge' }) + if (mergeTask !== undefined) { + mergeSize = mergeTask.result.size + mergeDuration = mergeTask.end - mergeTask.start + } + }) + if (transferSize !== undefined) { + globalTransferSize += transferSize + text.push( + `- **Transfer size**: ${formatSize(transferSize)}`, + `- **Transfer speed**: ${formatSpeed( + transferSize, + transferDuration + )}` + ) + } + if (mergeSize !== undefined) { + globalMergeSize += mergeSize + text.push( + `- **Merge size**: ${formatSize(mergeSize)}`, + `- **Merge speed**: ${formatSpeed(mergeSize, mergeDuration)}` + ) + } + if (vmTaskStatus === 'failure') { + ++nFailures + failedVmsText.push(...text, '', '', ...subText, '') + nagiosText.push( + `[(Failed) ${ + vm !== undefined ? vm.name_label : 'undefined' + }: (failed)[${failedSubTasks.toString()}]]` + ) + } else { + successfulVmsText.push(...text, '', '', ...subText, '') + } + } + } + const globalSuccess = nFailures === 0 && nSkipped === 0 + if (reportWhen === 'failure' && globalSuccess) { + return + } + + const nVms = vmsTaskLog.length + const nSuccesses = nVms - nFailures - nSkipped + const globalStatus = globalSuccess + ? `Success` + : nFailures !== 0 ? `Failure` : `Skipped` + let markdown = [ + `## Global status: ${globalStatus}`, + '', + `- **mode**: ${mode}`, + `- **Start time**: ${formatDate(jobLog.start)}`, + `- **End time**: ${formatDate(jobLog.end)}`, + `- **Duration**: ${formatDuration(jobLog.duration)}`, + `- **Successes**: ${nSuccesses} / ${nVms}`, + ] + + if (globalTransferSize !== 0) { + markdown.push(`- **Transfer size**: ${formatSize(globalTransferSize)}`) + } + if (globalMergeSize !== 0) { + markdown.push(`- **Merge size**: ${formatSize(globalMergeSize)}`) + } + markdown.push('') + + if (nFailures !== 0) { + markdown.push( + '---', + '', + `## ${nFailures} Failure${nFailures === 1 ? '' : 's'}`, + '', + ...failedVmsText + ) + } + + if (nSkipped !== 0) { + markdown.push('---', '', `## ${nSkipped} Skipped`, '', ...skippedVmsText) + } + + if (nSuccesses !== 0 && reportWhen !== 'failure') { + markdown.push( + '---', + '', + `## ${nSuccesses} Success${nSuccesses === 1 ? '' : 'es'}`, + '', + ...successfulVmsText + ) + } + + markdown.push('---', '', `*${pkg.name} v${pkg.version}*`) + markdown = markdown.join('\n') + return this._sendReport({ + markdown, + subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${jobName} ${ + globalSuccess + ? ICON_SUCCESS + : nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED + }`, + nagiosStatus: globalSuccess ? 0 : 2, + nagiosMarkdown: globalSuccess + ? `[Xen Orchestra] [Success] Backup report for ${jobName}` + : `[Xen Orchestra] [${ + nFailures !== 0 ? 'Failure' : 'Skipped' + }] Backup report for ${jobName} - VMs : ${nagiosText.join(' ')}`, + }) + } + + _sendReport ({ markdown, subject, nagiosStatus, nagiosMarkdown }) { + const xo = this._xo + return Promise.all([ + xo.sendEmail !== undefined && + xo.sendEmail({ + to: this._mailsReceivers, + subject, + markdown, + }), + xo.sendToXmppClient !== undefined && + xo.sendToXmppClient({ + to: this._xmppReceivers, + message: markdown, + }), + xo.sendSlackMessage !== undefined && + xo.sendSlackMessage({ + message: markdown, + }), + xo.sendPassiveCheck !== undefined && + xo.sendPassiveCheck({ + nagiosStatus, + message: nagiosMarkdown, + }), + ]) } _listener (status) { - const { calls } = status + const { calls, timezone, error } = status + const formatDate = createDateFormater(timezone) + + if (status.error !== undefined) { + const [globalStatus, icon] = + error.message === NO_VMS_MATCH_THIS_PATTERN + ? ['Skipped', ICON_SKIPPED] + : ['Failure', ICON_FAILURE] + + let markdown = [ + `## Global status: ${globalStatus}`, + '', + `- **Start time**: ${formatDate(status.start)}`, + `- **End time**: ${formatDate(status.end)}`, + `- **Duration**: ${formatDuration(status.end - status.start)}`, + `- **Error**: ${error.message}`, + '---', + '', + `*${pkg.name} v${pkg.version}*`, + ] + + markdown = markdown.join('\n') + return this._sendReport({ + subject: `[Xen Orchestra] ${globalStatus} ${icon}`, + markdown, + nagiosStatus: 2, + nagiosMarkdown: `[Xen Orchestra] [${globalStatus}] Error : ${ + error.message + }`, + }) + } + const callIds = Object.keys(calls) const nCalls = callIds.length @@ -139,8 +477,6 @@ class BackupReportsXoPlugin { const skippedBackupsText = [] const successfulBackupText = [] - const formatDate = createDateFormater(status.timezone) - forEach(calls, call => { const { id = call.params.vm } = call.params @@ -226,9 +562,8 @@ class BackupReportsXoPlugin { return } - const { end, start } = status const { tag } = oneCall.params - const duration = end - start + const duration = status.end - status.start const nSuccesses = nCalls - nFailures - nSkipped const globalStatus = globalSuccess ? `Success` @@ -238,8 +573,8 @@ class BackupReportsXoPlugin { `## Global status: ${globalStatus}`, '', `- **Type**: ${formatMethod(method)}`, - `- **Start time**: ${formatDate(start)}`, - `- **End time**: ${formatDate(end)}`, + `- **Start time**: ${formatDate(status.start)}`, + `- **End time**: ${formatDate(status.end)}`, `- **Duration**: ${formatDuration(duration)}`, `- **Successes**: ${nSuccesses} / ${nCalls}`, ] @@ -285,37 +620,20 @@ class BackupReportsXoPlugin { markdown = markdown.join('\n') - const xo = this._xo - return Promise.all([ - xo.sendEmail !== undefined && - xo.sendEmail({ - to: this._mailsReceivers, - subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${tag} ${ - globalSuccess - ? ICON_SUCCESS - : nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED - }`, - markdown, - }), - xo.sendToXmppClient !== undefined && - xo.sendToXmppClient({ - to: this._xmppReceivers, - message: markdown, - }), - xo.sendSlackMessage !== undefined && - xo.sendSlackMessage({ - message: markdown, - }), - xo.sendPassiveCheck !== undefined && - xo.sendPassiveCheck({ - status: globalSuccess ? 0 : 2, - message: globalSuccess - ? `[Xen Orchestra] [Success] Backup report for ${tag}` - : `[Xen Orchestra] [${ - nFailures !== 0 ? 'Failure' : 'Skipped' - }] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`, - }), - ]) + return this._sendReport({ + markdown, + subject: `[Xen Orchestra] ${globalStatus} − Backup report for ${tag} ${ + globalSuccess + ? ICON_SUCCESS + : nFailures !== 0 ? ICON_FAILURE : ICON_SKIPPED + }`, + nagiosStatus: globalSuccess ? 0 : 2, + nagiosMarkdown: globalSuccess + ? `[Xen Orchestra] [Success] Backup report for ${tag}` + : `[Xen Orchestra] [${ + nFailures !== 0 ? 'Failure' : 'Skipped' + }] Backup report for ${tag} - VMs : ${nagiosText.join(' ')}`, + }) } } diff --git a/packages/xo-server/src/api/backup-ng.js b/packages/xo-server/src/api/backup-ng.js index 9e1940fe2..535d10fd4 100644 --- a/packages/xo-server/src/api/backup-ng.js +++ b/packages/xo-server/src/api/backup-ng.js @@ -134,6 +134,14 @@ runJob.params = { // ----------------------------------------------------------------------------- +export function getAllLogs () { + return this.getBackupNgLogs() +} + +getAllLogs.permission = 'admin' + +// ----------------------------------------------------------------------------- + export function deleteVmBackup ({ id }) { return this.deleteVmBackupNg(id) } diff --git a/packages/xo-server/src/api/log.js b/packages/xo-server/src/api/log.js index 941685284..f495f5894 100644 --- a/packages/xo-server/src/api/log.js +++ b/packages/xo-server/src/api/log.js @@ -1,19 +1,5 @@ -export async function get ({ namespace }) { - const logger = await this.getLogger(namespace) - - return new Promise((resolve, reject) => { - const logs = {} - - logger - .createReadStream() - .on('data', data => { - logs[data.key] = data.value - }) - .on('end', () => { - resolve(logs) - }) - .on('error', reject) - }) +export function get ({ namespace }) { + return this.getLogs(namespace) } get.description = 'returns logs list for one namespace' diff --git a/packages/xo-server/src/schemas/log/jobStart.js b/packages/xo-server/src/schemas/log/jobStart.js index 13bab541d..3a5c156d4 100644 --- a/packages/xo-server/src/schemas/log/jobStart.js +++ b/packages/xo-server/src/schemas/log/jobStart.js @@ -16,6 +16,11 @@ export default { key: { type: 'string', }, + type: { + default: 'call', + enum: ['backup', 'call'], + }, + data: {}, }, - required: ['event', 'userId', 'jobId', 'key'], + required: ['event', 'userId', 'jobId'], } diff --git a/packages/xo-server/src/schemas/log/taskEnd.js b/packages/xo-server/src/schemas/log/taskEnd.js new file mode 100644 index 000000000..746a7b7c5 --- /dev/null +++ b/packages/xo-server/src/schemas/log/taskEnd.js @@ -0,0 +1,18 @@ +export default { + $schema: 'http://json-schema.org/draft-04/schema#', + type: 'object', + properties: { + event: { + enum: ['task.end'], + }, + taskId: { + type: 'string', + description: 'identifier of this task', + }, + status: { + enum: ['canceled', 'failure', 'success'], + }, + result: {}, + }, + required: ['event', 'taskId', 'status'], +} diff --git a/packages/xo-server/src/schemas/log/taskStart.js b/packages/xo-server/src/schemas/log/taskStart.js new file mode 100644 index 000000000..3d4725e50 --- /dev/null +++ b/packages/xo-server/src/schemas/log/taskStart.js @@ -0,0 +1,15 @@ +export default { + $schema: 'http://json-schema.org/draft-04/schema#', + type: 'object', + properties: { + event: { + enum: ['task.start'], + }, + parentId: { + type: 'string', + description: 'identifier of the parent task or job', + }, + data: {}, + }, + required: ['event'], +} diff --git a/packages/xo-server/src/xapi/index.js.flow b/packages/xo-server/src/xapi/index.js.flow index 39eacb43a..dd68d710d 100644 --- a/packages/xo-server/src/xapi/index.js.flow +++ b/packages/xo-server/src/xapi/index.js.flow @@ -58,7 +58,7 @@ declare export class Xapi { _updateObjectMapProperty( object: XapiObject, property: string, - entries: $Dict + entries: $Dict ): Promise; _setObjectProperties( object: XapiObject, diff --git a/packages/xo-server/src/xo-mixins/backups-ng/index.js b/packages/xo-server/src/xo-mixins/backups-ng/index.js index cee3abf62..68eb760ec 100644 --- a/packages/xo-server/src/xo-mixins/backups-ng/index.js +++ b/packages/xo-server/src/xo-mixins/backups-ng/index.js @@ -6,7 +6,15 @@ import defer from 'golike-defer' import { type Pattern, createPredicate } from 'value-matcher' import { type Readable, PassThrough } from 'stream' import { basename, dirname } from 'path' -import { isEmpty, last, mapValues, noop, values } from 'lodash' +import { + forEach, + groupBy, + isEmpty, + last, + mapValues, + noop, + values, +} from 'lodash' import { timeout as pTimeout } from 'promise-toolbox' import Vhd, { chainVhd, @@ -33,10 +41,12 @@ import { import { translateLegacyJob } from './migration' type Mode = 'full' | 'delta' +type ReportWhen = 'always' | 'failure' | 'never' type Settings = {| deleteFirst?: boolean, exportRetention?: number, + reportWhen?: ReportWhen, snapshotRetention?: number, vmTimeout?: number, |} @@ -56,13 +66,6 @@ export type BackupJob = {| vms: Pattern, |} -type BackupResult = {| - mergeDuration: number, - mergeSize: number, - transferDuration: number, - transferSize: number, -|} - type MetadataBase = {| _filename?: string, jobId: string, @@ -87,6 +90,33 @@ type MetadataFull = {| |} type Metadata = MetadataDelta | MetadataFull +type ConsolidatedJob = {| + duration?: number, + end?: number, + error?: Object, + id: string, + jobId: string, + mode: Mode, + start: number, + type: 'backup' | 'call', + userId: string, +|} +type ConsolidatedTask = {| + data?: Object, + duration?: number, + end?: number, + parentId: string, + message: string, + result?: Object, + start: number, + status: 'canceled' | 'failure' | 'success', + taskId: string, +|} +type ConsolidatedBackupNgLog = { + roots: Array, + [parentId: string]: Array, +} + const compareSnapshotTime = (a: Vm, b: Vm): number => a.snapshot_time < b.snapshot_time ? -1 : 1 @@ -105,6 +135,7 @@ const getOldEntries = (retention: number, entries?: T[]): T[] => const defaultSettings: Settings = { deleteFirst: false, exportRetention: 0, + reportWhen: 'failure', snapshotRetention: 0, vmTimeout: 0, } @@ -283,6 +314,77 @@ const writeStream = async ( } } +const wrapTask = async (opts: any, task: Promise): Promise => { + const { data, logger, message, parentId, result } = opts + + const taskId = logger.notice(message, { + event: 'task.start', + parentId, + data, + }) + + return task.then( + value => { + logger.notice(message, { + event: 'task.end', + result: + result === undefined + ? value + : typeof result === 'function' ? result(value) : result, + status: 'success', + taskId, + }) + return task + }, + result => { + logger.error(message, { + event: 'task.end', + result: serializeError(result), + status: 'failure', + taskId, + }) + return task + } + ) +} + +const wrapTaskFn = ( + opts: any, + task: (...any) => Promise +): ((taskId: string, ...any) => Promise) => + async function () { + const { data, logger, message, parentId, result } = + typeof opts === 'function' ? opts.apply(this, arguments) : opts + + const taskId = logger.notice(message, { + event: 'task.start', + parentId, + data, + }) + + try { + const value = await task.apply(this, [taskId, ...arguments]) + logger.notice(message, { + event: 'task.end', + result: + result === undefined + ? value + : typeof result === 'function' ? result(value) : result, + status: 'success', + taskId, + }) + return value + } catch (result) { + logger.error(message, { + event: 'task.end', + result: serializeError(result), + status: 'failure', + taskId, + }) + throw result + } + } + // File structure on remotes: // // @@ -316,6 +418,7 @@ export default class BackupNg { getXapi: (id: string) => Xapi, getJob: ((id: string, 'backup') => Promise) & ((id: string, 'call') => Promise), + getLogs: (namespace: string) => Promise<{ [id: string]: Object }>, updateJob: (($Shape, ?boolean) => Promise) & (($Shape, ?boolean) => Promise), removeJob: (id: string) => Promise, @@ -349,82 +452,59 @@ export default class BackupNg { } const jobId = job.id const scheduleId = schedule.id - const status: Object = { - calls: {}, - runJobId, - start: Date.now(), - timezone: schedule.timezone, - } - const { calls } = status await asyncMap(vms, async vm => { - const { uuid } = vm - const method = 'backup-ng' - const params = { - id: uuid, - tag: job.name, - } - - const name = vm.name_label - const runCallId = logger.notice( + const { name_label: name, uuid } = vm + const taskId: string = logger.notice( `Starting backup of ${name}. (${jobId})`, { - event: 'jobCall.start', - method, - params, - runJobId, + event: 'task.start', + parentId: runJobId, + data: { + type: 'VM', + id: uuid, + }, } ) - const call: Object = (calls[runCallId] = { - method, - params, - start: Date.now(), - }) const vmCancel = cancelToken.fork() try { // $FlowFixMe injected $defer param - let p = this._backupVm(vmCancel.token, uuid, job, schedule) + let p = this._backupVm( + vmCancel.token, + uuid, + job, + schedule, + logger, + taskId + ) const vmTimeout: number = getSetting( job.settings, 'vmTimeout', uuid, - scheduleId + scheduleId, + logger, + taskId ) if (vmTimeout !== 0) { p = pTimeout.call(p, vmTimeout) } - const returnedValue = await p - logger.notice( - `Backuping ${name} (${runCallId}) is a success. (${jobId})`, - { - event: 'jobCall.end', - runJobId, - runCallId, - returnedValue, - } - ) - - call.returnedValue = returnedValue - call.end = Date.now() + await p + logger.notice(`Backuping ${name} is a success. (${jobId})`, { + event: 'task.end', + taskId, + status: 'success', + }) } catch (error) { vmCancel.cancel() - logger.notice( - `Backuping ${name} (${runCallId}) has failed. (${jobId})`, - { - event: 'jobCall.end', - runJobId, - runCallId, - error: Array.isArray(error) - ? error.map(serializeError) - : serializeError(error), - } - ) - - call.error = error - call.end = Date.now() + logger.error(`Backuping ${name} has failed. (${jobId})`, { + event: 'task.end', + taskId, + status: 'failure', + result: Array.isArray(error) + ? error.map(serializeError) + : serializeError(error), + }) } }) - status.end = Date.now() - return status } app.registerJobExecutor('backup', executor) }) @@ -618,8 +698,10 @@ export default class BackupNg { $cancelToken: any, vmUuid: string, job: BackupJob, - schedule: Schedule - ): Promise { + schedule: Schedule, + logger: any, + taskId: string + ): Promise { const app = this._app const xapi = app.getXapi(vmUuid) const vm: Vm = (xapi.getObject(vmUuid): any) @@ -660,10 +742,18 @@ export default class BackupNg { await xapi._assertHealthyVdiChains(vm) - let snapshot: Vm = (await xapi._snapshotVm( - $cancelToken, - vm, - `[XO Backup ${job.name}] ${vm.name_label}` + let snapshot: Vm = (await wrapTask( + { + parentId: taskId, + logger, + message: 'snapshot', + result: _ => _.uuid, + }, + xapi._snapshotVm( + $cancelToken, + vm, + `[XO Backup ${job.name}] ${vm.name_label}` + ) ): any) await xapi._updateObjectMapProperty(snapshot, 'other_config', { 'xo:backup:job': jobId, @@ -686,12 +776,7 @@ export default class BackupNg { snapshot = ((await xapi.barrier(snapshot.$ref): any): Vm) if (exportRetention === 0) { - return { - mergeDuration: 0, - mergeSize: 0, - transferDuration: 0, - transferSize: 0, - } + return } const remotes = unboxIds(job.remotes) @@ -746,93 +831,123 @@ export default class BackupNg { const jsonMetadata = JSON.stringify(metadata) - const errors = [] await waitAll( [ - ...remotes.map(async remoteId => { - const fork = forkExport() - - const handler = await app.getRemoteHandler(remoteId) - - const oldBackups: MetadataFull[] = (getOldEntries( - exportRetention, - await this._listVmBackups( - handler, - vm, - _ => _.mode === 'full' && _.scheduleId === scheduleId - ) - ): any) - - const deleteFirst = getSetting(settings, 'deleteFirst', remoteId) - if (deleteFirst) { - await this._deleteFullVmBackups(handler, oldBackups) - } - - await writeStream(fork, handler, dataFilename) - - await handler.outputFile(metadataFilename, jsonMetadata) - - if (!deleteFirst) { - await this._deleteFullVmBackups(handler, oldBackups) - } - }), - ...srs.map(async srId => { - const fork = forkExport() - - const xapi = app.getXapi(srId) - const sr = xapi.getObject(srId) - - const oldVms = getOldEntries( - exportRetention, - listReplicatedVms(xapi, scheduleId, srId, vmUuid) - ) - - const deleteFirst = getSetting(settings, 'deleteFirst', srId) - if (deleteFirst) { - await this._deleteVms(xapi, oldVms) - } - - const vm = await xapi.barrier( - await xapi._importVm($cancelToken, fork, sr, vm => - xapi._setObjectProperties(vm, { - nameLabel: `${metadata.vm.name_label} (${safeDateFormat( - metadata.timestamp - )})`, - }) - ) - ) - - await Promise.all([ - xapi.addTag(vm.$ref, 'Disaster Recovery'), - xapi._updateObjectMapProperty(vm, 'blocked_operations', { - start: - 'Start operation for this vm is blocked, clone it if you want to use it.', + ...remotes.map( + wrapTaskFn( + id => ({ + data: { id, type: 'remote' }, + logger, + message: 'export', + parentId: taskId, }), - xapi._updateObjectMapProperty(vm, 'other_config', { - 'xo:backup:sr': srId, - }), - ]) + async (taskId, remoteId) => { + const fork = forkExport() - if (!deleteFirst) { - await this._deleteVms(xapi, oldVms) - } - }), + const handler = await app.getRemoteHandler(remoteId) + + const oldBackups: MetadataFull[] = (getOldEntries( + exportRetention, + await this._listVmBackups( + handler, + vm, + _ => _.mode === 'full' && _.scheduleId === scheduleId + ) + ): any) + + const deleteFirst = getSetting( + settings, + 'deleteFirst', + remoteId + ) + if (deleteFirst) { + await this._deleteFullVmBackups(handler, oldBackups) + } + + await wrapTask( + { + logger, + message: 'transfer', + parentId: taskId, + result: { + size: 0, + }, + }, + writeStream(fork, handler, dataFilename) + ) + + await handler.outputFile(metadataFilename, jsonMetadata) + + if (!deleteFirst) { + await this._deleteFullVmBackups(handler, oldBackups) + } + } + ) + ), + ...srs.map( + wrapTaskFn( + id => ({ + data: { id, type: 'SR' }, + logger, + message: 'export', + parentId: taskId, + }), + async (taskId, srId) => { + const fork = forkExport() + + const xapi = app.getXapi(srId) + const sr = xapi.getObject(srId) + + const oldVms = getOldEntries( + exportRetention, + listReplicatedVms(xapi, scheduleId, srId, vmUuid) + ) + + const deleteFirst = getSetting(settings, 'deleteFirst', srId) + if (deleteFirst) { + await this._deleteVms(xapi, oldVms) + } + + const vm = await xapi.barrier( + await wrapTask( + { + logger, + message: 'transfer', + parentId: taskId, + result: { + size: 0, + }, + }, + xapi._importVm($cancelToken, fork, sr, vm => + xapi._setObjectProperties(vm, { + nameLabel: `${metadata.vm.name_label} (${safeDateFormat( + metadata.timestamp + )})`, + }) + ) + ) + ) + + await Promise.all([ + xapi.addTag(vm.$ref, 'Disaster Recovery'), + xapi._updateObjectMapProperty(vm, 'blocked_operations', { + start: + 'Start operation for this vm is blocked, clone it if you want to use it.', + }), + xapi._updateObjectMapProperty(vm, 'other_config', { + 'xo:backup:sr': srId, + }), + ]) + + if (!deleteFirst) { + await this._deleteVms(xapi, oldVms) + } + } + ) + ), ], - error => { - console.warn(error) - errors.push(error) - } + noop // errors are handled in logs ) - if (errors.length !== 0) { - throw errors - } - - return { - mergeDuration: 0, - mergeSize: 0, - transferDuration: Date.now() - now, - transferSize: xva.size, - } } else if (job.mode === 'delta') { if (snapshotRetention === 0) { // only keep the snapshot in case of success @@ -904,128 +1019,164 @@ export default class BackupNg { } })() - const mergeStart = 0 - const mergeEnd = 0 - let transferStart = 0 - let transferEnd = 0 - const errors = [] await waitAll( [ - ...remotes.map(async remoteId => { - const fork = forkExport() + ...remotes.map( + wrapTaskFn( + id => ({ + data: { id, type: 'remote' }, + logger, + message: 'export', + parentId: taskId, + }), + async (taskId, remoteId) => { + const fork = forkExport() - const handler = await app.getRemoteHandler(remoteId) + const handler = await app.getRemoteHandler(remoteId) - const oldBackups: MetadataDelta[] = (getOldEntries( - exportRetention, - await this._listVmBackups( - handler, - vm, - _ => _.mode === 'delta' && _.scheduleId === scheduleId - ) - ): any) + const oldBackups: MetadataDelta[] = (getOldEntries( + exportRetention, + await this._listVmBackups( + handler, + vm, + _ => _.mode === 'delta' && _.scheduleId === scheduleId + ) + ): any) + const deleteOldBackups = () => + wrapTask( + { + logger, + message: 'merge', + parentId: taskId, + result: { + size: 0, + }, + }, + this._deleteDeltaVmBackups(handler, oldBackups) + ) - const deleteFirst = - exportRetention > 1 && - getSetting(settings, 'deleteFirst', remoteId) - if (deleteFirst) { - await this._deleteDeltaVmBackups(handler, oldBackups) - } - - await asyncMap( - fork.vdis, - defer(async ($defer, vdi, id) => { - const path = `${vmDir}/${metadata.vhds[id]}` - - const isDelta = vdi.other_config['xo:base_delta'] !== undefined - let parentPath - if (isDelta) { - const vdiDir = dirname(path) - const parent = (await handler.list(vdiDir)) - .filter(isVhd) - .sort() - .pop() - parentPath = `${vdiDir}/${parent}` + const deleteFirst = + exportRetention > 1 && + getSetting(settings, 'deleteFirst', remoteId) + if (deleteFirst) { + await deleteOldBackups() } - await writeStream(fork.streams[`${id}.vhd`](), handler, path, { - // no checksum for VHDs, because they will be invalidated by - // merges and chainings - checksum: false, - }) - $defer.onFailure.call(handler, 'unlink', path) + await wrapTask( + { + logger, + message: 'transfer', + parentId: taskId, + result: { + size: 0, + }, + }, + asyncMap( + fork.vdis, + defer(async ($defer, vdi, id) => { + const path = `${vmDir}/${metadata.vhds[id]}` - if (isDelta) { - await chainVhd(handler, parentPath, handler, path) + const isDelta = + vdi.other_config['xo:base_delta'] !== undefined + let parentPath + if (isDelta) { + const vdiDir = dirname(path) + const parent = (await handler.list(vdiDir)) + .filter(isVhd) + .sort() + .pop() + parentPath = `${vdiDir}/${parent}` + } + + await writeStream( + fork.streams[`${id}.vhd`](), + handler, + path, + { + // no checksum for VHDs, because they will be invalidated by + // merges and chainings + checksum: false, + } + ) + $defer.onFailure.call(handler, 'unlink', path) + + if (isDelta) { + await chainVhd(handler, parentPath, handler, path) + } + }) + ) + ) + + await handler.outputFile(metadataFilename, jsonMetadata) + + if (!deleteFirst) { + await deleteOldBackups() } - }) + } ) + ), + ...srs.map( + wrapTaskFn( + id => ({ + data: { id, type: 'SR' }, + logger, + message: 'export', + parentId: taskId, + }), + async (taskId, srId) => { + const fork = forkExport() - await handler.outputFile(metadataFilename, jsonMetadata) + const xapi = app.getXapi(srId) + const sr = xapi.getObject(srId) - if (!deleteFirst) { - await this._deleteDeltaVmBackups(handler, oldBackups) - } - }), - ...srs.map(async srId => { - const fork = forkExport() + const oldVms = getOldEntries( + exportRetention, + listReplicatedVms(xapi, scheduleId, srId, vmUuid) + ) - const xapi = app.getXapi(srId) - const sr = xapi.getObject(srId) + const deleteFirst = getSetting(settings, 'deleteFirst', srId) + if (deleteFirst) { + await this._deleteVms(xapi, oldVms) + } - const oldVms = getOldEntries( - exportRetention, - listReplicatedVms(xapi, scheduleId, srId, vmUuid) + const { vm } = await wrapTask( + { + logger, + message: 'transfer', + parentId: taskId, + result: { + size: 0, + }, + }, + xapi.importDeltaVm(fork, { + disableStartAfterImport: false, // we'll take care of that + name_label: `${metadata.vm.name_label} (${safeDateFormat( + metadata.timestamp + )})`, + srId: sr.$id, + }) + ) + + await Promise.all([ + xapi.addTag(vm.$ref, 'Continuous Replication'), + xapi._updateObjectMapProperty(vm, 'blocked_operations', { + start: + 'Start operation for this vm is blocked, clone it if you want to use it.', + }), + xapi._updateObjectMapProperty(vm, 'other_config', { + 'xo:backup:sr': srId, + }), + ]) + + if (!deleteFirst) { + await this._deleteVms(xapi, oldVms) + } + } ) - - const deleteFirst = getSetting(settings, 'deleteFirst', srId) - if (deleteFirst) { - await this._deleteVms(xapi, oldVms) - } - - transferStart = Math.min(transferStart, Date.now()) - - const { vm } = await xapi.importDeltaVm(fork, { - disableStartAfterImport: false, // we'll take care of that - name_label: `${metadata.vm.name_label} (${safeDateFormat( - metadata.timestamp - )})`, - srId: sr.$id, - }) - - transferEnd = Math.max(transferEnd, Date.now()) - - await Promise.all([ - xapi.addTag(vm.$ref, 'Continuous Replication'), - xapi._updateObjectMapProperty(vm, 'blocked_operations', { - start: - 'Start operation for this vm is blocked, clone it if you want to use it.', - }), - xapi._updateObjectMapProperty(vm, 'other_config', { - 'xo:backup:sr': srId, - }), - ]) - - if (!deleteFirst) { - await this._deleteVms(xapi, oldVms) - } - }), + ), ], - error => { - console.warn(error) - errors.push(error) - } + noop // errors are handled in logs ) - if (errors.length !== 0) { - throw errors - } - - return { - mergeDuration: mergeEnd - mergeStart, - mergeSize: 0, - transferDuration: transferEnd - transferStart, - transferSize: 0, - } } else { throw new Error(`no exporter for backup mode ${job.mode}`) } @@ -1137,4 +1288,54 @@ export default class BackupNg { return backups.sort(compareTimestamp) } + + async getBackupNgLogs (runId?: string): Promise { + const rawLogs = await this._app.getLogs('jobs') + + const logs: $Dict = {} + forEach(rawLogs, (log, id) => { + const { data, time, message } = log + const { event } = data + delete data.event + + switch (event) { + case 'job.start': + if (data.type === 'backup' && (runId === undefined || runId === id)) { + logs[id] = { + ...data, + id, + start: time, + } + } + break + case 'job.end': + const job = logs[data.runJobId] + if (job !== undefined) { + job.end = time + job.duration = time - job.start + job.error = data.error + } + break + case 'task.start': + if (logs[data.parentId] !== undefined) { + logs[id] = { + ...data, + start: time, + message, + } + } + break + case 'task.end': + const task = logs[data.taskId] + if (task !== undefined) { + task.status = data.status + task.taskId = data.taskId + task.result = data.result + task.end = time + task.duration = time - task.start + } + } + }) + return groupBy(logs, log => log.parentId || 'roots') + } } diff --git a/packages/xo-server/src/xo-mixins/jobs/index.js b/packages/xo-server/src/xo-mixins/jobs/index.js index 91f0681d8..084b4388d 100644 --- a/packages/xo-server/src/xo-mixins/jobs/index.js +++ b/packages/xo-server/src/xo-mixins/jobs/index.js @@ -209,18 +209,32 @@ export default class Jobs { throw new Error(`job ${id} is already running`) } - const executor = this._executors[job.type] + const { type } = job + const executor = this._executors[type] if (executor === undefined) { - throw new Error(`cannot run job ${id}: no executor for type ${job.type}`) + throw new Error(`cannot run job ${id}: no executor for type ${type}`) + } + + let data + if (type === 'backup') { + // $FlowFixMe only defined for BackupJob + const settings = job.settings[''] + data = { + // $FlowFixMe only defined for BackupJob + mode: job.mode, + reportWhen: (settings && settings.reportWhen) || 'failure', + } } const logger = this._logger const runJobId = logger.notice(`Starting execution of ${id}.`, { + data, event: 'job.start', userId: job.userId, jobId: id, // $FlowFixMe only defined for CallJob key: job.key, + type, }) runningJobs[id] = runJobId @@ -231,7 +245,7 @@ export default class Jobs { session = app.createUserConnection() session.set('user_id', job.userId) - const status = await executor({ + await executor({ app, cancelToken, job, @@ -245,8 +259,7 @@ export default class Jobs { runJobId, }) - session.close() - app.emit('job:terminated', status) + app.emit('job:terminated', runJobId, job, schedule) } catch (error) { logger.error(`The execution of ${id} has failed.`, { event: 'job.end', diff --git a/packages/xo-server/src/xo-mixins/logs/index.js b/packages/xo-server/src/xo-mixins/logs/index.js index a10aeb8fb..917e1d8c9 100644 --- a/packages/xo-server/src/xo-mixins/logs/index.js +++ b/packages/xo-server/src/xo-mixins/logs/index.js @@ -32,11 +32,11 @@ export default class Logs { const onData = keep !== 0 ? () => { - if (--keep === 0) { - stream.on('data', deleteEntry) - stream.removeListener('data', onData) + if (--keep === 0) { + stream.on('data', deleteEntry) + stream.removeListener('data', onData) + } } - } : deleteEntry stream.on('data', onData) @@ -51,4 +51,22 @@ export default class Logs { .getStore('logs') .then(store => new LevelDbLogger(store, namespace)) } + + async getLogs (namespace) { + const logger = await this.getLogger(namespace) + + return new Promise((resolve, reject) => { + const logs = {} + + logger + .createReadStream() + .on('data', data => { + logs[data.key] = data.value + }) + .on('end', () => { + resolve(logs) + }) + .on('error', reject) + }) + } } diff --git a/packages/xo-web/src/common/intl/messages.js b/packages/xo-web/src/common/intl/messages.js index 879b10092..5f74be8c6 100644 --- a/packages/xo-web/src/common/intl/messages.js +++ b/packages/xo-web/src/common/intl/messages.js @@ -288,6 +288,23 @@ const messages = { jobFinished: 'Finished', jobInterrupted: 'Interrupted', jobStarted: 'Started', + jobFailed: 'Failed', + jobSkipped: 'Skipped', + jobSuccess: 'Successful', + allTasks: 'All', + taskStart: 'Start', + taskEnd: 'End', + taskDuration: 'Duration', + taskSuccess: 'Successful', + taskFailed: 'Failed', + taskSkipped: 'Skipped', + taskStarted: 'Started', + taskInterrupted: 'Interrupted', + taskTransferredDataSize: 'Transfer size', + taskTransferredDataSpeed: 'Transfer speed', + taskMergedDataSize: 'Merge size', + taskMergedDataSpeed: 'Merge speed', + taskError: 'Error', saveBackupJob: 'Save', deleteBackupSchedule: 'Remove backup job', deleteBackupScheduleQuestion: @@ -326,6 +343,11 @@ const messages = { runBackupNgJobConfirm: 'Are you sure you want to run {name} ({id})?', // ------ New backup ----- + newBackupAdvancedSettings: 'Advanced settings', + reportWhenAlways: 'Always', + reportWhenFailure: 'Failure', + reportWhenNever: 'Never', + reportWhen: 'Report when', newBackupSelection: 'Select your backup type:', smartBackupModeSelection: 'Select backup mode:', normalBackup: 'Normal backup', @@ -1633,6 +1655,7 @@ const messages = { logParams: 'Params', logMessage: 'Message', logError: 'Error', + logTitle: 'Logs', logDisplayDetails: 'Display details', logTime: 'Date', logNoStackTrace: 'No stack trace', diff --git a/packages/xo-web/src/common/xo/index.js b/packages/xo-web/src/common/xo/index.js index e7ed112b1..41d3c4c67 100644 --- a/packages/xo-web/src/common/xo/index.js +++ b/packages/xo-web/src/common/xo/index.js @@ -1705,6 +1705,10 @@ export const subscribeBackupNgJobs = createSubscription(() => _call('backupNg.getAllJobs') ) +export const subscribeBackupNgLogs = createSubscription(() => + _call('backupNg.getAllLogs') +) + export const createBackupNgJob = props => _call('backupNg.createJob', props)::tap(subscribeBackupNgJobs.forceRefresh) diff --git a/packages/xo-web/src/xo-app/backup-ng/index.js b/packages/xo-web/src/xo-app/backup-ng/index.js index 95ba53b92..e0e148d78 100644 --- a/packages/xo-web/src/xo-app/backup-ng/index.js +++ b/packages/xo-web/src/xo-app/backup-ng/index.js @@ -21,7 +21,7 @@ import { subscribeSchedules, } from 'xo' -import LogsTable from '../logs' +import LogsTable from '../logs/backup-ng-logs' import Page from '../page' import Edit from './edit' diff --git a/packages/xo-web/src/xo-app/backup-ng/new/index.js b/packages/xo-web/src/xo-app/backup-ng/new/index.js index d2a82ef5a..8632c7118 100644 --- a/packages/xo-web/src/xo-app/backup-ng/new/index.js +++ b/packages/xo-web/src/xo-app/backup-ng/new/index.js @@ -3,6 +3,7 @@ import ActionButton from 'action-button' import Icon from 'icon' import React from 'react' import renderXoItem, { renderXoItemFromId } from 'render-xo-item' +import Select from 'form/select' import Tooltip from 'tooltip' import Upgrade from 'xoa-upgrade' import { addSubscriptions, resolveId, resolveIds } from 'utils' @@ -12,9 +13,10 @@ import { find, findKey, flatten, - keyBy, + get, includes, isEmpty, + keyBy, map, some, } from 'lodash' @@ -89,6 +91,23 @@ const getNewSchedules = schedules => { return newSchedules } +const REPORT_WHEN_FILTER_OPTIONS = [ + { + label: 'reportWhenAlways', + value: 'always', + }, + { + label: 'reportWhenFailure', + value: 'failure', + }, + { + label: 'reportWhenNever', + value: 'Never', + }, +] + +const getOptionRenderer = ({ label }) => {_(label)} + const getInitialState = () => ({ $pool: {}, backupMode: false, @@ -103,6 +122,7 @@ const getInitialState = () => ({ paramsUpdated: false, powerState: 'All', remotes: [], + reportWhen: 'failure', schedules: [], settings: {}, smartMode: false, @@ -136,6 +156,9 @@ export default [ schedules: getNewSchedules(state.newSchedules), settings: { ...getNewSettings(state.newSchedules), + '': { + reportWhen: state.reportWhen, + }, }, remotes: state.deltaMode || state.backupMode @@ -195,11 +218,16 @@ export default [ const oldSettings = props.job.settings const settings = state.settings + if (!('' in oldSettings)) { + oldSettings[''] = {} + } for (const id in oldSettings) { const oldSetting = oldSettings[id] const newSetting = settings[id] - if (!(id in settings)) { + if (id === '') { + oldSetting.reportWhen = state.reportWhen + } else if (!(id in settings)) { delete oldSettings[id] } else if ( oldSetting.snapshotRetention !== newSetting.snapshotRetention || @@ -281,6 +309,9 @@ export default [ const remotes = job.remotes !== undefined ? destructPattern(job.remotes) : [] const srs = job.srs !== undefined ? destructPattern(job.srs) : [] + const globalSettings = job.settings[''] + const settings = { ...job.settings } + delete settings[''] return { ...state, @@ -298,7 +329,8 @@ export default [ crMode: job.mode === 'delta' && !isEmpty(srs), remotes, srs, - settings: job.settings, + reportWhen: get(globalSettings, 'reportWhen') || 'failure', + settings, schedules, ...destructVmsPattern(job.vms), } @@ -455,6 +487,10 @@ export default [ return getInitialState() }, + setReportWhen: (_, { value }) => state => ({ + ...state, + reportWhen: value, + }), }, computed: { needUpdateParams: (state, { job, schedules }) => @@ -698,6 +734,25 @@ export default [ )} + + {_('newBackupAdvancedSettings')} + + + + taskLog, + isJobRunning => isJobRunning, + ({ end, status, result }, isJobRunning) => + end !== undefined + ? status === 'success' + ? 'success' + : result !== undefined && isSkippedError(result) ? 'skipped' : 'failure' + : isJobRunning ? 'started' : 'interrupted' +) + +const getSubTaskStatus = createSelector( + taskLog => taskLog, + isJobRunning => isJobRunning, + ({ end, status, result }, isJobRunning) => + end !== undefined + ? status === 'success' ? 'success' : 'failure' + : isJobRunning ? 'started' : 'interrupted' +) + +const TASK_STATUS = { + failure: { + icon: 'halted', + label: 'taskFailed', + }, + skipped: { + icon: 'skipped', + label: 'taskSkipped', + }, + success: { + icon: 'running', + label: 'taskSuccess', + }, + started: { + icon: 'busy', + label: 'taskStarted', + }, + interrupted: { + icon: 'halted', + label: 'taskInterrupted', + }, +} + +const TaskStateInfos = ({ status }) => { + const { icon, label } = TASK_STATUS[status] + return ( + + + + ) +} + +const VmTaskDataInfos = ({ logs, vmTaskId }) => { + let transferSize, transferDuration, mergeSize, mergeDuration + forEach(logs[vmTaskId], ({ taskId }) => { + if (transferSize !== undefined) { + return false + } + + const transferTask = find(logs[taskId], { message: 'transfer' }) + if (transferTask !== undefined) { + transferSize = transferTask.result.size + transferDuration = transferTask.end - transferTask.start + } + + const mergeTask = find(logs[taskId], { message: 'merge' }) + if (mergeTask !== undefined) { + mergeSize = mergeTask.result.size + mergeDuration = mergeTask.end - mergeTask.start + } + }) + + if (transferSize === undefined) { + return null + } + + return ( +
+ {_.keyValue(_('taskTransferredDataSize'), formatSize(transferSize))} +
+ {_.keyValue( + _('taskTransferredDataSpeed'), + formatSpeed(transferSize, transferDuration) + )} + {mergeSize !== undefined && ( +
+ {_.keyValue(_('taskMergedDataSize'), formatSize(mergeSize))} +
+ {_.keyValue( + _('taskMergedDataSpeed'), + formatSpeed(mergeSize, mergeDuration) + )} +
+ )} +
+ ) +} + +const UNHEALTHY_VDI_CHAIN_LINK = + 'https://xen-orchestra.com/docs/backup_troubleshooting.html#vdi-chain-protection' + +const ALL_FILTER_OPTION = { label: 'allTasks', value: 'all' } +const FAILURE_FILTER_OPTION = { label: 'taskFailed', value: 'failure' } +const STARTED_FILTER_OPTION = { label: 'taskStarted', value: 'started' } +const TASK_FILTER_OPTIONS = [ + ALL_FILTER_OPTION, + FAILURE_FILTER_OPTION, + STARTED_FILTER_OPTION, + { label: 'taskInterrupted', value: 'interrupted' }, + { label: 'taskSkipped', value: 'skipped' }, + { label: 'taskSuccess', value: 'success' }, +] + +const getFilteredTaskLogs = (logs, isJobRunning, filterValue) => + filterValue === 'all' + ? logs + : filter(logs, log => getTaskStatus(log, isJobRunning) === filterValue) + +const getInitialFilter = (job, logs, log) => { + const isEmptyFilter = filterValue => + isEmpty( + getFilteredTaskLogs( + logs[log.id], + get(job, 'runId') === log.id, + filterValue + ) + ) + + if (!isEmptyFilter('started')) { + return STARTED_FILTER_OPTION + } + + if (!isEmptyFilter('failure')) { + return FAILURE_FILTER_OPTION + } + + return ALL_FILTER_OPTION +} + +export default [ + addSubscriptions({ + remotes: cb => + subscribeRemotes(remotes => { + cb(keyBy(remotes, 'id')) + }), + }), + provideState({ + initialState: ({ job, logs, log }) => ({ + filter: getInitialFilter(job, logs, log), + }), + effects: { + setFilter: (_, filter) => state => ({ + ...state, + filter, + }), + }, + computed: { + isJobRunning: (_, { job, log }) => get(job, 'runId') === log.id, + filteredTaskLogs: ({ filter: { value }, isJobRunning }, { log, logs }) => + getFilteredTaskLogs(logs[log.id], isJobRunning, value), + optionRenderer: ({ isJobRunning }, { log, logs }) => ({ + label, + value, + }) => ( + + {_(label)} ({ + getFilteredTaskLogs(logs[log.id], isJobRunning, value).length + }) + + ), + }, + }), + injectState, + ({ job, log, logs, remotes, state, effects }) => + log.error !== undefined ? ( + + + {log.error.message} + + + ) : ( +
+