Compare commits
8 Commits
vhd_hashes
...
fix_backup
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6fdf0a97e4 | ||
|
|
3a4bdd3b1e | ||
|
|
1023131828 | ||
|
|
e2d83324ac | ||
|
|
7cea445c21 | ||
|
|
b5d9d9a9e1 | ||
|
|
3a4e9b8f8e | ||
|
|
92efd28b33 |
@@ -26,16 +26,7 @@ export const IncrementalXapi = class IncrementalXapiVmBackupRunner extends Abstr
|
||||
}
|
||||
|
||||
_mustDoSnapshot() {
|
||||
const vm = this._vm
|
||||
|
||||
const settings = this._settings
|
||||
return (
|
||||
settings.unconditionalSnapshot ||
|
||||
(!settings.offlineBackup && vm.power_state === 'Running') ||
|
||||
settings.snapshotRetention !== 0 ||
|
||||
settings.fullInterval !== 1 ||
|
||||
settings.deltaComputationMode === 'AGAINST_PARENT_VHD'
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
async _copy() {
|
||||
|
||||
@@ -3,11 +3,11 @@ import mapValues from 'lodash/mapValues.js'
|
||||
import ignoreErrors from 'promise-toolbox/ignoreErrors'
|
||||
import { asyncEach } from '@vates/async-each'
|
||||
import { asyncMap } from '@xen-orchestra/async-map'
|
||||
import { chainVhd, checkVhdChain, openVhd, VhdAbstract, VhdSynthetic } from 'vhd-lib'
|
||||
import { chainVhd, checkVhdChain, openVhd, VhdAbstract } from 'vhd-lib'
|
||||
import { createLogger } from '@xen-orchestra/log'
|
||||
import { decorateClass } from '@vates/decorate-with'
|
||||
import { defer } from 'golike-defer'
|
||||
import { dirname } from 'node:path'
|
||||
import { dirname, basename as pathBasename } from 'node:path'
|
||||
|
||||
import { formatFilenameDate } from '../../_filenameDate.mjs'
|
||||
import { getOldEntries } from '../../_getOldEntries.mjs'
|
||||
@@ -183,7 +183,6 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
|
||||
const isDifferencing = isVhdDifferencing[`${id}.vhd`]
|
||||
let parentPath
|
||||
let parentVhd
|
||||
if (isDifferencing) {
|
||||
const vdiDir = dirname(path)
|
||||
parentPath = (
|
||||
@@ -200,16 +199,14 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
undefined,
|
||||
`missing parent of ${id} in ${dirname(path)}, looking for ${vdi.other_config[TAG_BASE_DELTA]}`
|
||||
)
|
||||
|
||||
assert.ok(
|
||||
pathBasename(parentPath) < pathBasename(path),
|
||||
`vhd must be sorted to be chained`
|
||||
)
|
||||
parentPath = parentPath.slice(1) // remove leading slash
|
||||
|
||||
// TODO remove when this has been done before the export
|
||||
await checkVhd(handler, parentPath)
|
||||
if(settings.deltaComputationMode === 'AGAINST_PARENT_VHD'){
|
||||
const {dispose, value } = await VhdSynthetic.fromVhdChain(handler, parentPath)
|
||||
parentVhd = value
|
||||
$defer(()=>dispose())
|
||||
}
|
||||
}
|
||||
|
||||
// don't write it as transferSize += await async function
|
||||
@@ -219,7 +216,6 @@ export class IncrementalRemoteWriter extends MixinRemoteWriter(AbstractIncrement
|
||||
// no checksum for VHDs, because they will be invalidated by
|
||||
// merges and chainings
|
||||
checksum: false,
|
||||
parentVhd,
|
||||
validator: tmpPath => checkVhd(handler, tmpPath),
|
||||
writeBlockConcurrency: this._config.writeBlockConcurrency,
|
||||
})
|
||||
|
||||
@@ -13,6 +13,10 @@
|
||||
|
||||
> Users must be able to say: “I had this issue, happy to know it's fixed”
|
||||
|
||||
- [Settings/XO Config] Sort backups from newest to oldest
|
||||
- [Plugins/audit] Don't log `tag.getAllConfigured` calls
|
||||
- [Remotes] Correctly clear error when the remote is tested with success
|
||||
|
||||
### Packages to release
|
||||
|
||||
> When modifying a package, add it here with its release type.
|
||||
@@ -29,6 +33,9 @@
|
||||
|
||||
<!--packages-start-->
|
||||
|
||||
- @xen-orchestra/backups patch
|
||||
- xo-server patch
|
||||
- xo-server-audit patch
|
||||
- xo-web patch
|
||||
|
||||
<!--packages-end-->
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
{
|
||||
"name": "xen-orchestra",
|
||||
"version": "0.0.0",
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.0.0",
|
||||
"@babel/eslint-parser": "^7.13.8",
|
||||
@@ -94,7 +96,7 @@
|
||||
},
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"build": "turbo run build --scope xo-server --scope xo-server-'*' --scope xo-web",
|
||||
"build": "TURBO_TELEMETRY_DISABLED=1 turbo run build --scope xo-server --scope xo-server-'*' --scope xo-web",
|
||||
"build:xo-lite": "turbo run build --scope @xen-orchestra/lite",
|
||||
"clean": "scripts/run-script.js --parallel clean",
|
||||
"dev": "scripts/run-script.js --parallel --concurrency 0 --verbose dev",
|
||||
|
||||
@@ -84,9 +84,6 @@ exports.VhdAbstract = class VhdAbstract {
|
||||
readBlockAllocationTable() {
|
||||
throw new Error(`reading block allocation table is not implemented`)
|
||||
}
|
||||
readBlockHashes() {
|
||||
throw new Error(`reading block hashes table is not implemented`)
|
||||
}
|
||||
|
||||
/**
|
||||
* @typedef {Object} BitmapBlock
|
||||
@@ -107,10 +104,6 @@ exports.VhdAbstract = class VhdAbstract {
|
||||
throw new Error(`reading ${onlyBitmap ? 'bitmap of block' : 'block'} ${blockId} is not implemented`)
|
||||
}
|
||||
|
||||
getBlockHash(blockId){
|
||||
throw new Error(`reading block hash ${blockId} is not implemented`)
|
||||
}
|
||||
|
||||
/**
|
||||
* coalesce the block with id blockId from the child vhd into
|
||||
* this vhd
|
||||
|
||||
@@ -4,7 +4,6 @@ const { unpackHeader, unpackFooter, sectorsToBytes } = require('./_utils')
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { fuFooter, fuHeader, checksumStruct } = require('../_structs')
|
||||
const { test, set: setBitmap } = require('../_bitmap')
|
||||
const { hashBlock } = require('../hashBlock')
|
||||
const { VhdAbstract } = require('./VhdAbstract')
|
||||
const assert = require('assert')
|
||||
const { synchronized } = require('decorator-synchronized')
|
||||
@@ -76,7 +75,6 @@ function getCompressor(compressorType) {
|
||||
|
||||
exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
#uncheckedBlockTable
|
||||
#blockHashes
|
||||
#header
|
||||
footer
|
||||
#compressor
|
||||
@@ -142,17 +140,6 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
this.#blockTable = buffer
|
||||
}
|
||||
|
||||
async readBlockHashes() {
|
||||
try {
|
||||
const { buffer } = await this._readChunk('hashes')
|
||||
this.#blockHashes = JSON.parse(buffer)
|
||||
} catch (err) {
|
||||
if (err.code !== 'ENOENT') {
|
||||
throw err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
containsBlock(blockId) {
|
||||
return test(this.#blockTable, blockId)
|
||||
}
|
||||
@@ -190,11 +177,6 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
const blockSuffix = blockId - blockPrefix * 1e3
|
||||
return `blocks/${blockPrefix}/${blockSuffix}`
|
||||
}
|
||||
getBlockHash(blockId) {
|
||||
if (this.#blockHashes !== undefined) {
|
||||
return this.#blockHashes[blockId]
|
||||
}
|
||||
}
|
||||
|
||||
_getFullBlockPath(blockId) {
|
||||
return this.#getChunkPath(this.#getBlockPath(blockId))
|
||||
@@ -227,10 +209,6 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
throw new Error(`reading 'bitmap of block' ${blockId} in a VhdDirectory is not implemented`)
|
||||
}
|
||||
const { buffer } = await this._readChunk(this.#getBlockPath(blockId))
|
||||
const hash = this.getBlockHash(blockId)
|
||||
if (hash) {
|
||||
assert.strictEqual(hash, hash(buffer))
|
||||
}
|
||||
return {
|
||||
id: blockId,
|
||||
bitmap: buffer.slice(0, this.bitmapSize),
|
||||
@@ -266,7 +244,7 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
assert.notStrictEqual(this.#blockTable, undefined, 'Block allocation table has not been read')
|
||||
assert.notStrictEqual(this.#blockTable.length, 0, 'Block allocation table is empty')
|
||||
|
||||
return Promise.all([this._writeChunk('bat', this.#blockTable), this._writeChunk('hashes', this.#blockHashes)])
|
||||
return this._writeChunk('bat', this.#blockTable)
|
||||
}
|
||||
|
||||
// only works if data are in the same handler
|
||||
@@ -287,11 +265,8 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
await this._handler.rename(childBlockPath, this._getFullBlockPath(blockId))
|
||||
if (!blockExists) {
|
||||
setBitmap(this.#blockTable, blockId)
|
||||
this.#blockHashes[blockId] = child.getBlockHash(blockId)
|
||||
await this.writeBlockAllocationTable()
|
||||
}
|
||||
// @todo block hashes changs may be lost if the vhd merging fail
|
||||
// should migrate to writing bat from time to time, sync with the metadata
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT' && isResumingMerge === true) {
|
||||
// when resuming, the blocks moved since the last merge state write are
|
||||
@@ -312,7 +287,6 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
|
||||
async writeEntireBlock(block) {
|
||||
await this._writeChunk(this.#getBlockPath(block.id), block.buffer)
|
||||
setBitmap(this.#blockTable, block.id)
|
||||
this.#blockHashes[block.id] = hashBlock(block.buffer)
|
||||
}
|
||||
|
||||
async _readParentLocatorData(id) {
|
||||
|
||||
@@ -96,10 +96,6 @@ const VhdSynthetic = class VhdSynthetic extends VhdAbstract {
|
||||
assert(false, `no such block ${blockId}`)
|
||||
}
|
||||
|
||||
async getBlockHash(blockId){
|
||||
return this.#getVhdWithBlock(blockId).getBlockHash(blockId)
|
||||
}
|
||||
|
||||
async readBlock(blockId, onlyBitmap = false) {
|
||||
// only read the content of the first vhd containing this block
|
||||
return await this.#getVhdWithBlock(blockId).readBlock(blockId, onlyBitmap)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
'use strict'
|
||||
|
||||
const { createLogger } = require('@xen-orchestra/log')
|
||||
const { hashBlock } = require('./hashBlock.js')
|
||||
const { parseVhdStream } = require('./parseVhdStream.js')
|
||||
const { VhdDirectory } = require('./Vhd/VhdDirectory.js')
|
||||
const { Disposable } = require('promise-toolbox')
|
||||
@@ -9,7 +8,7 @@ const { asyncEach } = require('@vates/async-each')
|
||||
|
||||
const { warn } = createLogger('vhd-lib:createVhdDirectoryFromStream')
|
||||
|
||||
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression, parentVhd }) {
|
||||
const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, { concurrency, compression }) {
|
||||
const vhd = yield VhdDirectory.create(handler, path, { compression })
|
||||
await asyncEach(
|
||||
parseVhdStream(inputStream),
|
||||
@@ -25,10 +24,6 @@ const buildVhd = Disposable.wrap(async function* (handler, path, inputStream, {
|
||||
await vhd.writeParentLocator({ ...item, data: item.buffer })
|
||||
break
|
||||
case 'block':
|
||||
if (parentVhd !== undefined && hashBlock(item.buffer) === parentVhd.getBlockHash(item.id)) {
|
||||
// already in parent
|
||||
return
|
||||
}
|
||||
await vhd.writeEntireBlock(item)
|
||||
break
|
||||
case 'bat':
|
||||
@@ -50,10 +45,10 @@ exports.createVhdDirectoryFromStream = async function createVhdDirectoryFromStre
|
||||
handler,
|
||||
path,
|
||||
inputStream,
|
||||
{ validator, concurrency = 16, compression, parentVhd } = {}
|
||||
{ validator, concurrency = 16, compression } = {}
|
||||
) {
|
||||
try {
|
||||
const size = await buildVhd(handler, path, inputStream, { concurrency, compression, parentVhd })
|
||||
const size = await buildVhd(handler, path, inputStream, { concurrency, compression })
|
||||
if (validator !== undefined) {
|
||||
await validator.call(this, path)
|
||||
}
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
'use strict'
|
||||
|
||||
const { createHash } = require('node:crypto')
|
||||
|
||||
// using xxhash as for xva would make smaller hash and the collision risk would be low for the dedup,
|
||||
// since we have a tuple(index, hash), but it would be notable if
|
||||
// we implement dedup on top of this later
|
||||
// at most, a 2TB full vhd will use 32MB for its hashes
|
||||
// and this file is compressed with vhd block
|
||||
exports.hashBlock = function (buffer) {
|
||||
return createHash('sha256').update(buffer).digest('hex')
|
||||
}
|
||||
@@ -72,6 +72,7 @@ const DEFAULT_BLOCKED_LIST = {
|
||||
'system.getServerTimezone': true,
|
||||
'system.getServerVersion': true,
|
||||
'system.getVersion': true,
|
||||
'tag.getAllConfigured': true,
|
||||
'test.getPermissionsForUser': true,
|
||||
'user.getAll': true,
|
||||
'user.getAuthenticationTokens': true,
|
||||
|
||||
@@ -1099,7 +1099,9 @@ export const SelectXoCloudConfig = makeSubscriptionSelect(
|
||||
subscriber =>
|
||||
subscribeCloudXoConfigBackups(configs => {
|
||||
const xoObjects = groupBy(
|
||||
map(configs, config => ({ ...config, type: 'xoConfig' })),
|
||||
map(configs, config => ({ ...config, type: 'xoConfig' }))
|
||||
// from newest to oldest
|
||||
.sort((a, b) => b.createdAt - a.createdAt),
|
||||
'xoaId'
|
||||
)
|
||||
subscriber({
|
||||
|
||||
@@ -5,10 +5,9 @@ import decorate from 'apply-decorators'
|
||||
import Icon from 'icon'
|
||||
import React from 'react'
|
||||
import { confirm } from 'modal'
|
||||
import { getApiApplianceInfo, subscribeCloudXoConfig, subscribeCloudXoConfigBackups } from 'xo'
|
||||
import { groupBy, sortBy } from 'lodash'
|
||||
import { injectState, provideState } from 'reaclette'
|
||||
import { SelectXoCloudConfig } from 'select-objects'
|
||||
import { subscribeCloudXoConfig, subscribeCloudXoConfigBackups } from 'xo'
|
||||
|
||||
import BackupXoConfigModal from './backup-xo-config-modal'
|
||||
import RestoreXoConfigModal from './restore-xo-config-modal'
|
||||
@@ -88,15 +87,7 @@ const CloudConfig = decorate([
|
||||
},
|
||||
},
|
||||
computed: {
|
||||
applianceId: async () => {
|
||||
const { id } = await getApiApplianceInfo()
|
||||
return id
|
||||
},
|
||||
groupedConfigs: ({ applianceId, sortedConfigs }) =>
|
||||
sortBy(groupBy(sortedConfigs, 'xoaId'), config => (config[0].xoaId === applianceId ? -1 : 1)),
|
||||
isConfigDefined: ({ config }) => config != null,
|
||||
sortedConfigs: (_, { cloudXoConfigBackups }) =>
|
||||
cloudXoConfigBackups?.sort((config, nextConfig) => config.createdAt - nextConfig.createdAt),
|
||||
},
|
||||
}),
|
||||
injectState,
|
||||
|
||||
@@ -33,7 +33,7 @@ const formatError = error => (typeof error === 'string' ? error : JSON.stringify
|
||||
|
||||
const _changeUrlElement = (value, { remote, element }) =>
|
||||
editRemote(remote, {
|
||||
url: format({ ...remote, [element]: value === null ? undefined : value }),
|
||||
url: format({ ...parse(remote.url), [element]: value === null ? undefined : value }),
|
||||
})
|
||||
const _showError = remote => alert(_('remoteConnectionFailed'), <pre>{formatError(remote.error)}</pre>)
|
||||
const _editRemoteName = (name, { remote }) => editRemote(remote, { name })
|
||||
|
||||
Reference in New Issue
Block a user