Compare commits

...

23 Commits

Author SHA1 Message Date
Julien Fontanet
58954b5dbc feat(xo-server): 5.71.0-0 2020-11-03 17:27:34 +01:00
Julien Fontanet
c0470140f1 feat(vhd-lib): 0.9.0-0 2020-11-03 17:27:25 +01:00
Julien Fontanet
b34b02e4b0 feat(@xen-orchestra/fs): 0.12.0-0 2020-11-03 17:26:32 +01:00
Nicolas Raynaud
72eb7aed3f update changelog 2020-10-30 17:40:46 +01:00
Nicolas Raynaud
ecacc3a9e5 add changelog entry 2020-10-30 17:09:52 +01:00
Nicolas Raynaud
9ce6a6eb09 add changelog entry 2020-10-30 16:42:23 +01:00
Nicolas Raynaud
2f55ee9028 Merge branch 'master' into nr-copy-file-range-merge 2020-10-30 15:41:17 +01:00
Nicolas Raynaud
d26c093fe1 Merge branch 'master' into nr-copy-file-range-merge 2020-10-30 11:46:34 +01:00
Nicolas Raynaud
c953f34b01 Merge branch 'master' into nr-copy-file-range-merge 2020-10-29 13:46:25 +01:00
Nicolas Raynaud
69267d0d04 remove writeBlankRange()
(keeping it for another branch)
2020-10-29 03:16:47 +01:00
Nicolas Raynaud
3dee6f4247 some cleanup 2020-10-29 00:57:26 +01:00
Nicolas Raynaud
4b715d7d96 some cleanup 2020-10-29 00:52:55 +01:00
Nicolas Raynaud
f3088dbafd make fAllocateSyscall() optional 2020-10-29 00:44:58 +01:00
Nicolas Raynaud
6bafdf3827 add console.log 2020-10-23 13:32:51 +02:00
Nicolas Raynaud
663d6b4607 add console.log 2020-10-23 01:04:20 +02:00
Nicolas Raynaud
eeb8049ff5 Merge branch 'master' into nr-copy-file-range-merge 2020-10-22 22:24:59 +02:00
Nicolas Raynaud
898d787659 try to get the tests running. 2020-10-22 22:15:47 +02:00
Nicolas Raynaud
57c320eaf6 start wiring copyFileRange() in the merge function 2020-10-22 16:35:09 +02:00
Nicolas Raynaud
64ec631b21 added writeBlankRange() and fSync() 2020-10-17 06:44:33 +02:00
Nicolas Raynaud
79626a3e38 fix s3.write() to work on new files 2020-10-17 06:42:39 +02:00
Nicolas Raynaud
b10c5ca6e8 added the copy_file_range loop, tested NFS 2020-10-17 02:24:16 +02:00
Nicolas Raynaud
9beb9c3ac5 begin introducing copyFileRange() method to remotes. 2020-10-16 13:30:58 +02:00
Nicolas Raynaud
d2b06f3ee7 try to use copy_file_range for VHD merge 2020-10-15 04:38:17 +02:00
17 changed files with 209 additions and 59 deletions

View File

@@ -7,14 +7,14 @@
"bugs": "https://github.com/vatesfr/xen-orchestra/issues",
"dependencies": {
"@xen-orchestra/backups": "^0.1.1",
"@xen-orchestra/fs": "^0.11.1",
"@xen-orchestra/fs": "^0.12.0-0",
"filenamify": "^4.1.0",
"getopts": "^2.2.5",
"limit-concurrency-decorator": "^0.4.0",
"lodash": "^4.17.15",
"promise-toolbox": "^0.15.0",
"proper-lockfile": "^4.1.1",
"vhd-lib": "^0.8.0"
"vhd-lib": "^0.9.0-0"
},
"engines": {
"node": ">=7.10.1"

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "@xen-orchestra/fs",
"version": "0.11.1",
"version": "0.12.0-0",
"license": "AGPL-3.0-or-later",
"description": "The File System for Xen Orchestra backups.",
"keywords": [],
@@ -36,6 +36,7 @@
"readable-stream": "^3.0.6",
"through2": "^4.0.2",
"tmp": "^0.2.1",
"syscall": "^0.2.0",
"xo-remote-parser": "^0.6.0"
},
"devDependencies": {

View File

@@ -311,6 +311,28 @@ export default class RemoteHandlerAbstract {
)
}
/**
* Copy a range from one file to the other, kernel side, server side or with a reflink if possible.
*
* Slightly different from the copy_file_range linux system call:
* - offsets are mandatory (because some remote handlers don't have a current pointer for files)
* - flags is fixed to 0
* - will not return until copy is finished.
*
* @param fdIn read open file descriptor
* @param offsetIn either start offset in the source file
* @param fdOut write open file descriptor (not append!)
* @param offsetOut offset in the target file
* @param dataLen how long to copy
* @returns {Promise<void>}
*/
async copyFileRange(fdIn, offsetIn, fdOut, offsetOut, dataLen) {
// default implementation goes through the network
const buffer = Buffer.alloc(dataLen)
await this._read(fdIn, buffer, offsetIn)
await this._write(fdOut, buffer, offsetOut)
}
async readFile(
file: string,
{ flags = 'r' }: { flags?: string } = {}
@@ -357,27 +379,51 @@ export default class RemoteHandlerAbstract {
}
async test(): Promise<Object> {
const SIZE = 1024 * 1024 * 10
const testFileName = normalizePath(`${Date.now()}.test`)
const data = await fromCallback(randomBytes, SIZE)
const SIZE = 1024 * 1024 * 100
const now = Date.now()
const testFileName = normalizePath(`${now}.test`)
const testFileName2 = normalizePath(`${now}__dup.test`)
// get random ASCII for easy debug
const data = Buffer.from((await fromCallback(randomBytes, SIZE)).toString('base64'), 'ascii').slice(0, SIZE)
let step = 'write'
try {
const writeStart = process.hrtime()
await this._outputFile(testFileName, data, { flags: 'wx' })
const writeDuration = process.hrtime(writeStart)
let cloneDuration
const fd1 = await this.openFile(testFileName, 'r+')
try {
const fd2 = await this.openFile(testFileName2, 'wx')
try {
step = 'duplicate'
const cloneStart = process.hrtime()
await this.copyFileRange(fd1, 0, fd2, 0, data.byteLength)
cloneDuration = process.hrtime(cloneStart)
console.log('cloneDuration', cloneDuration)
} finally {
await this._closeFile(fd2)
}
} finally {
await this._closeFile(fd1)
}
step = 'read'
const readStart = process.hrtime()
const read = await this._readFile(testFileName, { flags: 'r' })
const readDuration = process.hrtime(readStart)
if (!data.equals(read)) {
throw new Error('output and input did not match')
}
const read2 = await this._readFile(testFileName2, { flags: 'r' })
if (!data.equals(read2)) {
throw new Error('duplicated and input did not match')
}
return {
success: true,
writeRate: computeRate(writeDuration, SIZE),
readRate: computeRate(readDuration, SIZE),
cloneDuration: computeRate(cloneDuration, SIZE),
}
} catch (error) {
return {
@@ -388,6 +434,7 @@ export default class RemoteHandlerAbstract {
}
} finally {
ignoreErrors.call(this._unlink(testFileName))
ignoreErrors.call(this._unlink(testFileName2))
}
}
@@ -428,7 +475,7 @@ export default class RemoteHandlerAbstract {
// Methods that can be called by private methods to avoid parallel limit on public methods
async __closeFile(fd: FileDescriptor): Promise<void> {
await timeout.call(this._closeFile(fd.fd), this._timeout)
await timeout.call(this._closeFile(fd), this._timeout)
}
async __mkdir(dir: string): Promise<void> {

View File

@@ -1,10 +1,39 @@
import df from '@sindresorhus/df'
import fs from 'fs-extra'
import { fromEvent } from 'promise-toolbox'
import { Syscall6 } from 'syscall'
import RemoteHandlerAbstract from './abstract'
/**
* @returns the number of byte effectively copied, needs to be called in a loop!
* @throws Error if the syscall returned -1
*/
function copyFileRangeSyscall(fdIn, offsetIn, fdOut, offsetOut, dataLen, flags = 0) {
// we are stuck on linux x86_64 because of int64 representation and syscall numbers
function wrapOffset(offsetIn) {
if (offsetIn == null)
return 0
const offsetInBuffer = new Uint32Array(2)
new DataView(offsetInBuffer.buffer).setBigUint64(0, BigInt(offsetIn), true)
return offsetInBuffer
}
// https://man7.org/linux/man-pages/man2/copy_file_range.2.html
const SYS_copy_file_range = 326
const [copied, _, errno] = Syscall6(SYS_copy_file_range, fdIn, wrapOffset(offsetIn), fdOut, wrapOffset(offsetOut), dataLen, flags)
if (copied === -1) {
throw new Error('Error no ' + errno)
}
return copied
}
export default class LocalHandler extends RemoteHandlerAbstract {
constructor(remote: any, options: Object = {}) {
super(remote, options)
this._canFallocate = true
}
get type() {
return 'file'
}
@@ -18,7 +47,7 @@ export default class LocalHandler extends RemoteHandlerAbstract {
}
async _closeFile(fd) {
return fs.close(fd)
return fs.close(fd.fd)
}
async _createReadStream(file, options) {
@@ -81,6 +110,26 @@ export default class LocalHandler extends RemoteHandlerAbstract {
return fs.open(this._getFilePath(path), flags)
}
/**
* Slightly different from the linux system call:
* - offsets are mandatory (because some remote handlers don't have a current pointer for files)
* - flags is fixed to 0
* - will not return until copy is finished.
*
* @param fdIn read open file descriptor
* @param offsetIn either start offset in the source file
* @param fdOut write open file descriptor (not append!)
* @param offsetOut offset in the target file
* @param dataLen how long to copy
* @returns {Promise<void>}
*/
async copyFileRange(fdIn, offsetIn, fdOut, offsetOut, dataLen) {
let copied = 0
do {
copied += await copyFileRangeSyscall(fdIn.fd, offsetIn + copied, fdOut.fd, offsetOut + copied, dataLen - copied)
} while (dataLen - copied > 0)
}
async _read(file, buffer, position) {
const needsClose = typeof file === 'string'
file = needsClose ? await fs.open(this._getFilePath(file), 'r') : file.fd

View File

@@ -138,14 +138,21 @@ export default class S3Handler extends RemoteHandlerAbstract {
file = file.fd
}
const uploadParams = this._createParams(file)
const fileSize = +(await this._s3.headObject(uploadParams).promise())
.ContentLength
let fileSize
try {
fileSize = +(await this._s3.headObject(uploadParams).promise()).ContentLength
} catch (e) {
if (e.code === 'NotFound') {
fileSize = 0
} else {
throw e
}
}
if (fileSize < MIN_PART_SIZE) {
const resultBuffer = Buffer.alloc(
Math.max(fileSize, position + buffer.length)
)
const fileContent = (await this._s3.getObject(uploadParams).promise())
.Body
const fileContent = fileSize ? (await this._s3.getObject(uploadParams).promise()).Body : Buffer.alloc(0)
fileContent.copy(resultBuffer)
buffer.copy(resultBuffer, position)
await this._s3

View File

@@ -2,6 +2,7 @@ import { parse } from 'xo-remote-parser'
import MountHandler from './_mount'
import normalizePath from './_normalizePath'
import { fromEvent } from "promise-toolbox"
export default class SmbMountHandler extends MountHandler {
constructor(remote, opts) {
@@ -22,4 +23,21 @@ export default class SmbMountHandler extends MountHandler {
get type() {
return 'smb'
}
// nraynaud: in some circumstances, renaming the file triggers a bug where we can't re-open it afterwards in SMB2
// SMB linux client Linux xoa 4.19.0-12-amd64 #1 SMP Debian 4.19.152-1 (2020-10-18) x86_64 GNU/Linux
// server Windows 10 Family Edition 1909 (v18363.1139)
async _outputStream(input, path, { checksum }) {
const output = await this.createOutputStream(path, { checksum })
try {
input.pipe(output)
await fromEvent(output, 'finish')
await output.checksumWritten
// $FlowFixMe
await input.task
} catch (error) {
await this.unlink(path, { checksum })
throw error
}
}
}

View File

@@ -7,6 +7,8 @@
> Users must be able to say: “Nice enhancement, I'm eager to test it”
- [backup] improve merge speed after backup when using SMB3.1.1 or NFS4.2 (PR [#5331](https://github.com/vatesfr/xen-orchestra/pull/5331))
### Bug fixes
> Users must be able to say: “I had this issue, happy to know it's fixed”
@@ -27,3 +29,8 @@
> - major: if the change breaks compatibility
>
> In case of conflict, the highest (lowest in previous list) `$version` wins.
- @xen-orchestra/fs minor
- vhd-lib minor
- xo-server minor
- xo-web minor

View File

@@ -40,7 +40,6 @@
"jest": {
"collectCoverage": true,
"moduleNameMapper": {
"^.": "./src",
"^(@vates/[^/]+)": "$1/src",
"^(@xen-orchestra/[^/]+)": "$1/src",
"^(value-matcher)": "$1/src",

View File

@@ -28,12 +28,12 @@
"node": ">=8.10"
},
"dependencies": {
"@xen-orchestra/fs": "^0.11.1",
"@xen-orchestra/fs": "^0.12.0-0",
"cli-progress": "^3.1.0",
"exec-promise": "^0.7.0",
"getopts": "^2.2.3",
"struct-fu": "^1.2.0",
"vhd-lib": "^0.8.0"
"vhd-lib": "^0.9.0-0"
},
"devDependencies": {
"@babel/cli": "^7.0.0",

View File

@@ -11,7 +11,7 @@ import { pFromCallback } from 'promise-toolbox'
import { pipeline } from 'readable-stream'
import { randomBytes } from 'crypto'
import Vhd, { chainVhd, createSyntheticStream, mergeVhd as vhdMerge } from './'
import Vhd, { chainVhd, createSyntheticStream, mergeVhd as vhdMerge } from './src/index'
import { SECTOR_SIZE } from './src/_constants'

View File

@@ -1,7 +1,7 @@
{
"private": false,
"name": "vhd-lib",
"version": "0.8.0",
"version": "0.9.0-0",
"license": "AGPL-3.0-or-later",
"description": "Primitives for VHD file handling",
"keywords": [],
@@ -36,7 +36,7 @@
"@babel/core": "^7.0.0",
"@babel/preset-env": "^7.0.0",
"@babel/preset-flow": "^7.0.0",
"@xen-orchestra/fs": "^0.11.1",
"@xen-orchestra/fs": "^0.12.0-0",
"babel-plugin-lodash": "^3.3.2",
"cross-env": "^7.0.2",
"execa": "^4.0.2",

View File

@@ -17,6 +17,8 @@ export default concurrency(2)(async function merge(
childPath,
{ onProgress = noop } = {}
) {
// merges blocks
let mergedDataSize = 0
const parentFd = await parentHandler.openFile(parentPath, 'r+')
try {
const parentVhd = new Vhd(parentHandler, parentFd)
@@ -68,14 +70,12 @@ export default concurrency(2)(async function merge(
onProgress({ total: nBlocks, done: 0 })
// merges blocks
let mergedDataSize = 0
for (let i = 0, block = firstBlock; i < nBlocks; ++i, ++block) {
while (!childVhd.containsBlock(block)) {
++block
}
mergedDataSize += await parentVhd.coalesceBlock(childVhd, block)
mergedDataSize += await parentVhd.coalesceBlock(childVhd, block,childFd, parentFd )
onProgress({
total: nBlocks,
done: i + 1,
@@ -94,12 +94,11 @@ export default concurrency(2)(async function merge(
// necessary to update values and to recreate the footer after block
// creation
await parentVhd.writeFooter()
return mergedDataSize
} finally {
await childHandler.closeFile(childFd)
}
} finally {
await parentHandler.closeFile(parentFd)
}
return mergedDataSize
})

View File

@@ -199,30 +199,35 @@ export default class Vhd {
}
// return the first sector (bitmap) of a block
_getBatEntry(block) {
const i = block * 4
_getBatEntry(blockId) {
const i = blockId * 4
const { blockTable } = this
return i < blockTable.length ? blockTable.readUInt32BE(i) : BLOCK_UNUSED
}
_readBlock(blockId, onlyBitmap = false) {
// returns actual byt offset in the file or null
_getBlockOffsetBytes(blockId) {
const blockAddr = this._getBatEntry(blockId)
if (blockAddr === BLOCK_UNUSED) {
return blockAddr === BLOCK_UNUSED ? null : sectorsToBytes(blockAddr)
}
_readBlock(blockId, onlyBitmap = false) {
const blockAddr = this._getBlockOffsetBytes(blockId)
if (blockAddr === null) {
throw new Error(`no such block ${blockId}`)
}
return this._read(
sectorsToBytes(blockAddr),
return this._read(blockAddr,
onlyBitmap ? this.bitmapSize : this.fullBlockSize
).then(buf =>
onlyBitmap
? { id: blockId, bitmap: buf }
: {
id: blockId,
bitmap: buf.slice(0, this.bitmapSize),
data: buf.slice(this.bitmapSize),
buffer: buf,
}
id: blockId,
bitmap: buf.slice(0, this.bitmapSize),
data: buf.slice(this.bitmapSize),
buffer: buf,
}
)
}
@@ -307,15 +312,18 @@ export default class Vhd {
return this._write(blockTable.slice(i, i + 4), this.header.tableOffset + i)
}
// Allocate a new uninitialized block in the BAT
async _createBlock(blockId) {
assert.strictEqual(this._getBatEntry(blockId), BLOCK_UNUSED)
// Make a new empty block at vhd end.
// Update block allocation table in context and in file.
async _createBlock(blockId, fullBlock = Buffer.alloc(this.fullBlockSize)) {
const blockAddr = Math.ceil(this._getEndOfData() / SECTOR_SIZE)
debug(`create block ${blockId} at ${blockAddr}`)
await this._setBatEntry(blockId, blockAddr)
await Promise.all([
// Write an empty block and addr in vhd file.
this._write(fullBlock, sectorsToBytes(blockAddr)),
this._setBatEntry(blockId, blockAddr),
])
return blockAddr
}
@@ -338,12 +346,13 @@ export default class Vhd {
await this._write(bitmap, sectorsToBytes(blockAddr))
}
async _writeEntireBlock(block) {
let blockAddr = this._getBatEntry(block.id)
async _getAddressOrAllocate(blockId) {
const blockAddr = this._getBlockOffsetBytes(blockId)
return blockAddr === null ? await this._createBlock(blockId) : blockAddr
}
if (blockAddr === BLOCK_UNUSED) {
blockAddr = await this._createBlock(block.id)
}
async _writeEntireBlock(block) {
const blockAddr = this._getAddressOrAllocate(block.id)
await this._write(block.buffer, sectorsToBytes(blockAddr))
}
@@ -377,15 +386,18 @@ export default class Vhd {
)
}
async coalesceBlock(child, blockId) {
const block = await child._readBlock(blockId)
const { bitmap, data } = block
async coalesceBlock(child, blockId, childFd, parentFd) {
const childBlockAddress = child._getBlockOffsetBytes(blockId)
const bitmap = (await child._readBlock(blockId, true)).bitmap
debug(`coalesceBlock block=${blockId}`)
// For each sector of block data...
const { sectorsPerBlock } = child
// lazily loaded
let parentBitmap = null
// lazily loaded
let childBlock = null
for (let i = 0; i < sectorsPerBlock; i++) {
// If no changes on one sector, skip.
if (!mapTestBit(bitmap, i)) {
@@ -403,19 +415,22 @@ export default class Vhd {
const isFullBlock = i === 0 && endSector === sectorsPerBlock
if (isFullBlock) {
await this._writeEntireBlock(block)
await this._handler.copyFileRange(childFd, childBlockAddress, parentFd, await this._getAddressOrAllocate(blockId), this.fullBlockSize)
} else {
if (parentBitmap === null) {
parentBitmap = (await this._readBlock(blockId, true)).bitmap
}
await this._writeBlockSectors(block, i, endSector, parentBitmap)
if (childBlock === null) {
childBlock = await child._readBlock(blockId)
}
await this._writeBlockSectors(childBlock, i, endSector, parentBitmap)
}
i = endSector
}
// Return the merged data size
return data.length
return this.fullBlockSize - this.bitmapSize
}
// Write a context footer. (At the end and beginning of a vhd file.)
@@ -481,7 +496,7 @@ export default class Vhd {
)
const endInBuffer = Math.min(
((currentBlock + 1) * this.sectorsPerBlock - offsetSectors) *
SECTOR_SIZE,
SECTOR_SIZE,
buffer.length
)
let inputBuffer

View File

@@ -8,6 +8,6 @@
"promise-toolbox": "^0.13.0",
"readable-stream": "^3.1.1",
"throttle": "^1.0.3",
"vhd-lib": "^0.8.0"
"vhd-lib": "^0.9.0-0"
}
}

View File

@@ -1,7 +1,7 @@
{
"private": true,
"name": "xo-server",
"version": "5.70.0",
"version": "5.71.0-0",
"license": "AGPL-3.0-or-later",
"description": "Server part of Xen-Orchestra",
"keywords": [
@@ -38,7 +38,7 @@
"@xen-orchestra/cron": "^1.0.6",
"@xen-orchestra/defined": "^0.0.0",
"@xen-orchestra/emit-async": "^0.0.0",
"@xen-orchestra/fs": "^0.11.1",
"@xen-orchestra/fs": "^0.12.0-0",
"@xen-orchestra/log": "^0.2.0",
"@xen-orchestra/mixin": "^0.0.0",
"@xen-orchestra/self-signed": "^0.1.0",
@@ -130,7 +130,7 @@
"unzipper": "^0.10.5",
"uuid": "^3.0.1",
"value-matcher": "^0.2.0",
"vhd-lib": "^0.8.0",
"vhd-lib": "^0.9.0-0",
"ws": "^7.1.2",
"xdg-basedir": "^4.0.0",
"xen-api": "^0.29.0",

View File

@@ -30,7 +30,7 @@
"lodash": "^4.17.15",
"pako": "^1.0.11",
"promise-toolbox": "^0.15.0",
"vhd-lib": "^0.8.0",
"vhd-lib": "^0.9.0-0",
"xml2js": "^0.4.23"
},
"devDependencies": {

View File

@@ -3974,7 +3974,7 @@ bind-property-descriptor@^1.0.0:
dependencies:
lodash "^4.17.4"
bindings@^1.5.0:
bindings@^1.3.0, bindings@^1.5.0:
version "1.5.0"
resolved "https://registry.yarnpkg.com/bindings/-/bindings-1.5.0.tgz#10353c9e945334bc0511a6d90b38fbc7c9c504df"
integrity sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==
@@ -12784,7 +12784,7 @@ node-gyp-build@~4.1.0:
resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.1.1.tgz#d7270b5d86717068d114cc57fff352f96d745feb"
integrity sha512-dSq1xmcPDKPZ2EED2S6zw/b9NKsqzXRE6dVr8TVQnI3FJOTteUMuqF3Qqs6LZg+mLGYJWqQzMbIjMtJqTv87nQ==
node-gyp@^3.8.0:
node-gyp@^3.7.0, node-gyp@^3.8.0:
version "3.8.0"
resolved "https://registry.yarnpkg.com/node-gyp/-/node-gyp-3.8.0.tgz#540304261c330e80d0d5edce253a68cb3964218c"
integrity sha512-3g8lYefrRRzvGeSowdJKAKyks8oUpLEd/DyPV4eMhVlhJ0aNaZqIrNUIPuEWWTAoPqyFkfGrM67MC69baqn6vA==
@@ -17313,6 +17313,14 @@ syntax-error@^1.1.1:
dependencies:
acorn-node "^1.2.0"
syscall@^0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/syscall/-/syscall-0.2.0.tgz#9308898495dfb5c062ea7a60c46f81f29f532ac4"
integrity sha512-MLlgaLAMbOGKUVlqsLVYnJ4dBZmeE1nza4BVgVgGUr2dPV17tgR79JUPIUybX/EqGm1jywsXSXUPXpNbIXDVCw==
dependencies:
bindings "^1.3.0"
node-gyp "^3.7.0"
table@^5.2.3:
version "5.4.6"
resolved "https://registry.yarnpkg.com/table/-/table-5.4.6.tgz#1292d19500ce3f86053b05f0e8e7e4a3bb21079e"