feat(xosan): beta 2+ (#601)

This commit is contained in:
Nicolas Raynaud 2017-09-29 05:46:34 -07:00 committed by Julien Fontanet
parent c996b61eae
commit 6b17e80e28

View File

@ -9,6 +9,7 @@ import {
isArray, isArray,
remove, remove,
filter, filter,
find,
range range
} from 'lodash' } from 'lodash'
import { import {
@ -19,18 +20,30 @@ import {
const debug = createLogger('xo:xosan') const debug = createLogger('xo:xosan')
const SSH_KEY_FILE = 'id_rsa_xosan' const SSH_KEY_FILE = 'id_rsa_xosan'
const NETWORK_PREFIX = '172.31.100.' const DEFAULT_NETWORK_PREFIX = '172.31.100.'
const VM_FIRST_NUMBER = 101 const VM_FIRST_NUMBER = 101
const HOST_FIRST_NUMBER = 1
const GIGABYTE = 1024 * 1024 * 1024 const GIGABYTE = 1024 * 1024 * 1024
const XOSAN_VM_SYSTEM_DISK_SIZE = 10 * GIGABYTE const XOSAN_VM_SYSTEM_DISK_SIZE = 10 * GIGABYTE
const XOSAN_DATA_DISK_USEAGE_RATIO = 0.99 const XOSAN_DATA_DISK_USEAGE_RATIO = 0.99
const XOSAN_MAX_DISK_SIZE = 2093050 * 1024 * 1024 // a bit under 2To const XOSAN_MAX_DISK_SIZE = 2093050 * 1024 * 1024 // a bit under 2To
const CURRENTLY_CREATING_SRS = {} const CURRENT_POOL_OPERATIONS = {}
function getXosanConfig (xosansr, xapi = this.getXapi(xosansr)) {
const data = xapi.xo.getData(xosansr, 'xosan_config')
if (data && data.networkPrefix === undefined) {
// some xosan might have been created before this field was added
data.networkPrefix = DEFAULT_NETWORK_PREFIX
// fire and forget
xapi.xo.setData(xosansr, 'xosan_config', data)
}
return data
}
function _getIPToVMDict (xapi, sr) { function _getIPToVMDict (xapi, sr) {
const dict = {} const dict = {}
const data = xapi.xo.getData(sr, 'xosan_config') const data = getXosanConfig(sr, xapi)
if (data && data.nodes) { if (data && data.nodes) {
data.nodes.forEach(conf => { data.nodes.forEach(conf => {
try { try {
@ -45,11 +58,16 @@ function _getIPToVMDict (xapi, sr) {
function _getGlusterEndpoint (sr) { function _getGlusterEndpoint (sr) {
const xapi = this.getXapi(sr) const xapi = this.getXapi(sr)
const data = xapi.xo.getData(sr, 'xosan_config') const data = getXosanConfig(sr, xapi)
if (!data || !data.nodes) { if (!data || !data.nodes) {
return null return null
} }
return { xapi, data: data, hosts: map(data.nodes, node => xapi.getObject(node.host)), addresses: map(data.nodes, node => node.vm.ip) } return {
xapi,
data: data,
hosts: map(data.nodes, node => xapi.getObject(node.host)),
addresses: map(data.nodes, node => node.vm.ip)
}
} }
async function rateLimitedRetry (action, shouldRetry, retryCount = 20) { async function rateLimitedRetry (action, shouldRetry, retryCount = 20) {
@ -64,7 +82,7 @@ async function rateLimitedRetry (action, shouldRetry, retryCount = 20) {
return result return result
} }
export async function getVolumeInfo ({ sr, infoType }) { export async function getVolumeInfo ({sr, infoType}) {
const glusterEndpoint = this::_getGlusterEndpoint(sr) const glusterEndpoint = this::_getGlusterEndpoint(sr)
function parseHeal (parsed) { function parseHeal (parsed) {
@ -98,23 +116,36 @@ export async function getVolumeInfo ({ sr, infoType }) {
return {commandStatus: true, result: volume} return {commandStatus: true, result: volume}
} }
function sshInfoType (command, handler) {
return async () => {
const cmdShouldRetry = result => !result['commandStatus'] && result.parsed && result.parsed['cliOutput']['opErrno'] === '30802'
const runCmd = async () => glusterCmd(glusterEndpoint, 'volume ' + command, true)
let commandResult = await rateLimitedRetry(runCmd, cmdShouldRetry)
return commandResult['commandStatus'] ? handler(commandResult.parsed['cliOutput']) : commandResult
}
}
function checkHosts () {
const xapi = this.getXapi(sr)
const data = getXosanConfig(sr, xapi)
const network = xapi.getObject(data.network)
const badPifs = filter(network.$PIFs, pif => pif.ip_configuration_mode !== 'Static')
return badPifs.map(pif => ({pif, host: pif.$host.$id}))
}
const infoTypes = { const infoTypes = {
heal: {command: 'heal xosan info', handler: parseHeal}, heal: sshInfoType('heal xosan info', parseHeal),
status: {command: 'status xosan', handler: parseStatus}, status: sshInfoType('status xosan', parseStatus),
statusDetail: {command: 'status xosan detail', handler: parseStatus}, statusDetail: sshInfoType('status xosan detail', parseStatus),
statusMem: {command: 'status xosan mem', handler: parseStatus}, statusMem: sshInfoType('status xosan mem', parseStatus),
info: {command: 'info xosan', handler: parseInfo} info: sshInfoType('info xosan', parseInfo),
hosts: this::checkHosts
} }
const foundType = infoTypes[infoType] const foundType = infoTypes[infoType]
if (!foundType) { if (!foundType) {
throw new Error('getVolumeInfo(): "' + infoType + '" is an invalid type') throw new Error('getVolumeInfo(): "' + infoType + '" is an invalid type')
} }
return foundType()
const cmdShouldRetry =
result => !result['commandStatus'] && result.parsed && result.parsed['cliOutput']['opErrno'] === '30802'
const runCmd = async () => glusterCmd(glusterEndpoint, 'volume ' + foundType.command, true)
let commandResult = await rateLimitedRetry(runCmd, cmdShouldRetry)
return commandResult['commandStatus'] ? foundType.handler(commandResult.parsed['cliOutput']) : commandResult
} }
getVolumeInfo.description = 'info on gluster volume' getVolumeInfo.description = 'info on gluster volume'
@ -131,12 +162,51 @@ getVolumeInfo.params = {
getVolumeInfo.resolve = { getVolumeInfo.resolve = {
sr: ['sr', 'SR', 'administrate'] sr: ['sr', 'SR', 'administrate']
} }
function reconfigurePifIP (xapi, pif, newIP) {
xapi.call('PIF.reconfigure_ip', pif.$ref, 'Static', newIP, '255.255.255.0', '', '')
}
// this function should probably become fixSomething(thingToFix, parmas)
export async function fixHostNotInNetwork ({xosanSr, host}) {
const xapi = this.getXapi(xosanSr)
const data = getXosanConfig(xosanSr, xapi)
const network = xapi.getObject(data.network)
const usedAddresses = network.$PIFs.filter(pif => pif.ip_configuration_mode === 'Static').map(pif => pif.IP)
const pif = network.$PIFs.find(pif => pif.ip_configuration_mode !== 'Static' && pif.$host.$id === host)
if (pif) {
const newIP = _findIPAddressOutsideList(usedAddresses, HOST_FIRST_NUMBER)
reconfigurePifIP(xapi, pif, newIP)
await xapi.call('PIF.plug', pif.$ref)
const PBD = find(xosanSr.$PBDs, pbd => pbd.$host.$id === host)
if (PBD) {
await xapi.call('PBD.plug', PBD.$ref)
}
debug('host connected !')
}
}
fixHostNotInNetwork.description = 'put host in xosan network'
fixHostNotInNetwork.permission = 'admin'
fixHostNotInNetwork.params = {
xosanSr: {
type: 'string'
},
host: {
type: 'string'
}
}
fixHostNotInNetwork.resolve = {
sr: ['sr', 'SR', 'administrate']
}
function floor2048 (value) { function floor2048 (value) {
return 2048 * Math.floor(value / 2048) return 2048 * Math.floor(value / 2048)
} }
async function copyVm (xapi, originalVm, sr) { async function copyVm (xapi, originalVm, sr) {
return { sr, vm: await xapi.copyVm(originalVm, sr) } return {sr, vm: await xapi.copyVm(originalVm, sr)}
} }
async function callPlugin (xapi, host, command, params) { async function callPlugin (xapi, host, command, params) {
@ -210,8 +280,8 @@ async function glusterCmd (glusterEndpoint, cmd, ignoreError = false) {
return result return result
} }
const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure, xapi, pif, vlan) { const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure, xapi, pif, vlan, networkPrefix) {
let hostIpLastNumber = 1 let hostIpLastNumber = HOST_FIRST_NUMBER
const xosanNetwork = await xapi.createNetwork({ const xosanNetwork = await xapi.createNetwork({
name: 'XOSAN network', name: 'XOSAN network',
description: 'XOSAN network', description: 'XOSAN network',
@ -220,8 +290,7 @@ const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure,
vlan: +vlan vlan: +vlan
}) })
$onFailure(() => xapi.deleteNetwork(xosanNetwork)) $onFailure(() => xapi.deleteNetwork(xosanNetwork))
await Promise.all(xosanNetwork.$PIFs.map(pif => xapi.call('PIF.reconfigure_ip', pif.$ref, 'Static', await Promise.all(xosanNetwork.$PIFs.map(pif => reconfigurePifIP(xapi, pif, networkPrefix + (hostIpLastNumber++))))
NETWORK_PREFIX + (hostIpLastNumber++), '255.255.255.0', NETWORK_PREFIX + '1', '')))
return xosanNetwork return xosanNetwork
}) })
@ -253,6 +322,7 @@ const _probePoolAndWaitForPresence = defer.onFailure(async function ($onFailure,
await glusterCmd(glusterEndpoint, 'peer probe ' + address) await glusterCmd(glusterEndpoint, 'peer probe ' + address)
$onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + address, true)) $onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + address, true))
}) })
function shouldRetry (peers) { function shouldRetry (peers) {
for (let peer of peers) { for (let peer of peers) {
if (peer.state === '4') { if (peer.state === '4') {
@ -309,8 +379,15 @@ async function configureGluster (redundancy, ipAndHosts, glusterEndpoint, gluste
await glusterCmd(glusterEndpoint, 'volume start xosan') await glusterCmd(glusterEndpoint, 'volume start xosan')
} }
export const createSR = defer.onFailure(async function ($onFailure, { template, pif, vlan, srs, glusterType, export const createSR = defer.onFailure(async function ($onFailure, {
redundancy, brickSize, memorySize }) { template, pif, vlan, srs, glusterType,
redundancy, brickSize, memorySize = 2 * GIGABYTE, ipRange = DEFAULT_NETWORK_PREFIX + '.0'
}) {
const OPERATION_OBJECT = {
operation: 'createSr',
states: ['configuringNetwork', 'importingVm', 'copyingVms',
'configuringVms', 'configuringGluster', 'creatingSr', 'scanningSr']
}
if (!this.requestResource) { if (!this.requestResource) {
throw new Error('requestResource is not a function') throw new Error('requestResource is not a function')
} }
@ -318,16 +395,18 @@ export const createSR = defer.onFailure(async function ($onFailure, { template,
if (srs.length < 1) { if (srs.length < 1) {
return // TODO: throw an error return // TODO: throw an error
} }
// '172.31.100.0' -> '172.31.100.'
const networkPrefix = ipRange.split('.').slice(0, 3).join('.') + '.'
let vmIpLastNumber = VM_FIRST_NUMBER let vmIpLastNumber = VM_FIRST_NUMBER
const xapi = this.getXapi(srs[0]) const xapi = this.getXapi(srs[0])
if (CURRENTLY_CREATING_SRS[xapi.pool.$id]) { const poolId = xapi.pool.$id
if (CURRENT_POOL_OPERATIONS[poolId]) {
throw new Error('createSR is already running for this pool') throw new Error('createSR is already running for this pool')
} }
CURRENTLY_CREATING_SRS[xapi.pool.$id] = true CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
try { try {
const xosanNetwork = await createNetworkAndInsertHosts(xapi, pif, vlan) const xosanNetwork = await createNetworkAndInsertHosts(xapi, pif, vlan, networkPrefix)
$onFailure(() => xapi.deleteNetwork(xosanNetwork)) $onFailure(() => xapi.deleteNetwork(xosanNetwork))
const sshKey = await getOrCreateSshKey(xapi) const sshKey = await getOrCreateSshKey(xapi)
const srsObjects = map(srs, srId => xapi.getObject(srId)) const srsObjects = map(srs, srId => xapi.getObject(srId))
@ -338,10 +417,12 @@ export const createSR = defer.onFailure(async function ($onFailure, { template,
}))) })))
const firstSr = srsObjects[0] const firstSr = srsObjects[0]
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
const firstVM = await this::_importGlusterVM(xapi, template, firstSr) const firstVM = await this::_importGlusterVM(xapi, template, firstSr)
$onFailure(() => xapi.deleteVm(firstVM, true)) $onFailure(() => xapi.deleteVm(firstVM, true))
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
const copiedVms = await asyncMap(srsObjects.slice(1), sr => const copiedVms = await asyncMap(srsObjects.slice(1), sr =>
copyVm(xapi, firstVM, sr)::tap(({ vm }) => copyVm(xapi, firstVM, sr)::tap(({vm}) =>
$onFailure(() => xapi.deleteVm(vm)) $onFailure(() => xapi.deleteVm(vm))
) )
) )
@ -350,29 +431,35 @@ export const createSR = defer.onFailure(async function ($onFailure, { template,
sr: firstSr sr: firstSr
}].concat(copiedVms) }].concat(copiedVms)
let arbiter = null let arbiter = null
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 3}
if (srs.length === 2) { if (srs.length === 2) {
const sr = firstSr const sr = firstSr
const arbiterIP = NETWORK_PREFIX + (vmIpLastNumber++) const arbiterIP = networkPrefix + (vmIpLastNumber++)
const arbiterVm = await xapi.copyVm(firstVM, sr) const arbiterVm = await xapi.copyVm(firstVM, sr)
$onFailure(() => xapi.deleteVm(arbiterVm, true)) $onFailure(() => xapi.deleteVm(arbiterVm, true))
arbiter = await _prepareGlusterVm(xapi, sr, arbiterVm, xosanNetwork, arbiterIP, {labelSuffix: '_arbiter', arbiter = await _prepareGlusterVm(xapi, sr, arbiterVm, xosanNetwork, arbiterIP, {
labelSuffix: '_arbiter',
increaseDataDisk: false, increaseDataDisk: false,
memorySize}) memorySize
})
arbiter.arbiter = true arbiter.arbiter = true
} }
const ipAndHosts = await asyncMap(vmsAndSrs, vmAndSr => _prepareGlusterVm(xapi, vmAndSr.sr, vmAndSr.vm, xosanNetwork, const ipAndHosts = await asyncMap(vmsAndSrs, vmAndSr => _prepareGlusterVm(xapi, vmAndSr.sr, vmAndSr.vm, xosanNetwork,
NETWORK_PREFIX + (vmIpLastNumber++), {maxDiskSize: brickSize, memorySize})) networkPrefix + (vmIpLastNumber++), {maxDiskSize: brickSize, memorySize}))
const glusterEndpoint = { xapi, hosts: map(ipAndHosts, ih => ih.host), addresses: map(ipAndHosts, ih => ih.address) } const glusterEndpoint = {xapi, hosts: map(ipAndHosts, ih => ih.host), addresses: map(ipAndHosts, ih => ih.address)}
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 4}
await configureGluster(redundancy, ipAndHosts, glusterEndpoint, glusterType, arbiter) await configureGluster(redundancy, ipAndHosts, glusterEndpoint, glusterType, arbiter)
debug('xosan gluster volume started') debug('xosan gluster volume started')
// We use 10 IPs of the gluster VM range as backup, in the hope that even if the first VM gets destroyed we find at least // We use 10 IPs of the gluster VM range as backup, in the hope that even if the first VM gets destroyed we find at least
// one VM to give mount the volfile. // one VM to give mount the volfile.
// It is not possible to edit the device_config after the SR is created and this data is only used at mount time when rebooting // It is not possible to edit the device_config after the SR is created and this data is only used at mount time when rebooting
// the hosts. // the hosts.
const backupservers = map(range(VM_FIRST_NUMBER, VM_FIRST_NUMBER + 10), ipLastByte => NETWORK_PREFIX + ipLastByte).join(':') const backupservers = map(range(VM_FIRST_NUMBER, VM_FIRST_NUMBER + 10), ipLastByte => networkPrefix + ipLastByte).join(':')
const config = { server: ipAndHosts[0].address + ':/xosan', backupservers } const config = {server: ipAndHosts[0].address + ':/xosan', backupservers}
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 5}
const xosanSrRef = await xapi.call('SR.create', firstSr.$PBDs[0].$host.$ref, config, 0, 'XOSAN', 'XOSAN', const xosanSrRef = await xapi.call('SR.create', firstSr.$PBDs[0].$host.$ref, config, 0, 'XOSAN', 'XOSAN',
'xosan', '', true, {}) 'xosan', '', true, {})
debug('sr created')
// we just forget because the cleanup actions are stacked in the $onFailure system // we just forget because the cleanup actions are stacked in the $onFailure system
$onFailure(() => xapi.forgetSr(xosanSrRef)) $onFailure(() => xapi.forgetSr(xosanSrRef))
if (arbiter) { if (arbiter) {
@ -391,12 +478,14 @@ export const createSR = defer.onFailure(async function ($onFailure, { template,
template: template, template: template,
network: xosanNetwork.$id, network: xosanNetwork.$id,
type: glusterType, type: glusterType,
networkPrefix,
redundancy redundancy
}) })
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 6}
debug('scanning new SR') debug('scanning new SR')
await xapi.call('SR.scan', xosanSrRef) await xapi.call('SR.scan', xosanSrRef)
} finally { } finally {
delete CURRENTLY_CREATING_SRS[xapi.pool.$id] delete CURRENT_POOL_OPERATIONS[poolId]
} }
}) })
@ -420,6 +509,12 @@ createSR.params = {
}, },
redundancy: { redundancy: {
type: 'number' type: 'number'
},
memorySize: {
type: 'number', optional: true
},
ipRange: {
type: 'string', optional: true
} }
} }
@ -450,85 +545,118 @@ async function mountNewDisk (localEndpoint, hostname, newDeviceFiledeviceFile) {
} }
async function replaceBrickOnSameVM (xosansr, previousBrick, newLvmSr, brickSize) { async function replaceBrickOnSameVM (xosansr, previousBrick, newLvmSr, brickSize) {
// TODO: a bit of user input validation on 'previousBrick', it's going to ssh const OPERATION_OBJECT = {
const previousIp = previousBrick.split(':')[0] operation: 'replaceBrick',
brickSize = brickSize === undefined ? Infinity : brickSize states: ['creatingNewDisk', 'mountingDisk', 'swappingBrick', 'disconnectingOldDisk', 'scanningSr']
const xapi = this.getXapi(xosansr) }
const data = xapi.xo.getData(xosansr, 'xosan_config') const xapi = this.getXapi(xosansr)
const nodes = data.nodes const poolId = xapi.pool.$id
const nodeIndex = nodes.findIndex(node => node.vm.ip === previousIp) try {
const glusterEndpoint = this::_getGlusterEndpoint(xosansr) CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
const previousVM = _getIPToVMDict(xapi, xosansr)[previousBrick].vm
const newDeviceFile = await createNewDisk(xapi, newLvmSr, previousVM, brickSize) // TODO: a bit of user input validation on 'previousBrick', it's going to ssh
const localEndpoint = { const previousIp = previousBrick.split(':')[0]
xapi, brickSize = brickSize === undefined ? Infinity : brickSize
hosts: map(nodes, node => xapi.getObject(node.host)), const data = this::getXosanConfig(xosansr)
addresses: [previousIp] const nodes = data.nodes
const nodeIndex = nodes.findIndex(node => node.vm.ip === previousIp)
const glusterEndpoint = this::_getGlusterEndpoint(xosansr)
const previousVM = _getIPToVMDict(xapi, xosansr)[previousBrick].vm
const newDeviceFile = await createNewDisk(xapi, newLvmSr, previousVM, brickSize)
const localEndpoint = {
xapi,
hosts: map(nodes, node => xapi.getObject(node.host)),
addresses: [previousIp]
}
const previousBrickRoot = previousBrick.split(':')[1].split('/').slice(0, 3).join('/')
const previousBrickDevice = (await remoteSsh(localEndpoint, `grep " ${previousBrickRoot} " /proc/mounts | cut -d ' ' -f 1 | sed 's_/dev/__'`)).stdout.trim()
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
const brickName = await mountNewDisk(localEndpoint, previousIp, newDeviceFile)
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
await glusterCmd(glusterEndpoint, `volume replace-brick xosan ${previousBrick} ${brickName} commit force`)
nodes[nodeIndex].brickName = brickName
nodes[nodeIndex].underlyingSr = newLvmSr
await xapi.xo.setData(xosansr, 'xosan_config', data)
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 3}
await umountDisk(localEndpoint, previousBrickRoot)
const previousVBD = previousVM.$VBDs.find(vbd => vbd.device === previousBrickDevice)
await xapi.disconnectVbd(previousVBD)
await xapi.deleteVdi(previousVBD.VDI)
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 4}
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} finally {
delete CURRENT_POOL_OPERATIONS[poolId]
} }
const previousBrickRoot = previousBrick.split(':')[1].split('/').slice(0, 3).join('/')
const previousBrickDevice = (await remoteSsh(localEndpoint, `grep " ${previousBrickRoot} " /proc/mounts | cut -d ' ' -f 1 | sed 's_/dev/__'`)).stdout.trim()
const brickName = await mountNewDisk(localEndpoint, previousIp, newDeviceFile)
await glusterCmd(glusterEndpoint, `volume replace-brick xosan ${previousBrick} ${brickName} commit force`)
nodes[nodeIndex].brickName = brickName
nodes[nodeIndex].underlyingSr = newLvmSr
await xapi.xo.setData(xosansr, 'xosan_config', data)
await umountDisk(localEndpoint, previousBrickRoot)
const previousVBD = previousVM.$VBDs.find(vbd => vbd.device === previousBrickDevice)
await xapi.disconnectVbd(previousVBD)
await xapi.deleteVdi(previousVBD.VDI)
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} }
export async function replaceBrick ({ xosansr, previousBrick, newLvmSr, brickSize, onSameVM = true }) { export async function replaceBrick ({xosansr, previousBrick, newLvmSr, brickSize, onSameVM = true}) {
const OPERATION_OBJECT = {
operation: 'replaceBrick',
states: ['insertingNewVm', 'swapingBrick', 'deletingVm', 'scanningSr']
}
if (onSameVM) { if (onSameVM) {
return this::replaceBrickOnSameVM(xosansr, previousBrick, newLvmSr, brickSize) return this::replaceBrickOnSameVM(xosansr, previousBrick, newLvmSr, brickSize)
} }
// TODO: a bit of user input validation on 'previousBrick', it's going to ssh
const previousIp = previousBrick.split(':')[0]
brickSize = brickSize === undefined ? Infinity : brickSize
const xapi = this.getXapi(xosansr) const xapi = this.getXapi(xosansr)
const nodes = xapi.xo.getData(xosansr, 'xosan_config').nodes const poolId = xapi.pool.$id
const newIpAddress = _findAFreeIPAddress(nodes) try {
const nodeIndex = nodes.findIndex(node => node.vm.ip === previousIp) // TODO: a bit of user input validation on 'previousBrick', it's going to ssh
const stayingNodes = filter(nodes, (node, index) => index !== nodeIndex) const previousIp = previousBrick.split(':')[0]
const glusterEndpoint = { xapi, brickSize = brickSize === undefined ? Infinity : brickSize
hosts: map(stayingNodes, node => xapi.getObject(node.host)), const data = getXosanConfig(xosansr, xapi)
addresses: map(stayingNodes, node => node.vm.ip) } const nodes = data.nodes
const previousVMEntry = _getIPToVMDict(xapi, xosansr)[previousBrick] const newIpAddress = _findAFreeIPAddress(nodes, data.networkPrefix)
const arbiter = nodes[nodeIndex].arbiter const nodeIndex = nodes.findIndex(node => node.vm.ip === previousIp)
let { data, newVM, addressAndHost } = await this::insertNewGlusterVm(xapi, xosansr, newLvmSr, const stayingNodes = filter(nodes, (node, index) => index !== nodeIndex)
{labelSuffix: arbiter ? '_arbiter' : '', glusterEndpoint, newIpAddress, increaseDataDisk: !arbiter, brickSize}) const glusterEndpoint = {
await glusterCmd(glusterEndpoint, `volume replace-brick xosan ${previousBrick} ${addressAndHost.brickName} commit force`) xapi,
await glusterCmd(glusterEndpoint, 'peer detach ' + previousIp) hosts: map(stayingNodes, node => xapi.getObject(node.host)),
data.nodes.splice(nodeIndex, 1, { addresses: map(stayingNodes, node => node.vm.ip)
brickName: addressAndHost.brickName, }
host: addressAndHost.host.$id, const previousVMEntry = _getIPToVMDict(xapi, xosansr)[previousBrick]
arbiter: arbiter, const arbiter = nodes[nodeIndex].arbiter
vm: {ip: addressAndHost.address, id: newVM.$id}, CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
underlyingSr: newLvmSr let {newVM, addressAndHost} = await this::insertNewGlusterVm(xapi, xosansr, newLvmSr,
}) {labelSuffix: arbiter ? '_arbiter' : '', glusterEndpoint, newIpAddress, increaseDataDisk: !arbiter, brickSize})
await xapi.xo.setData(xosansr, 'xosan_config', data) CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
if (previousVMEntry) { await glusterCmd(glusterEndpoint, `volume replace-brick xosan ${previousBrick} ${addressAndHost.brickName} commit force`)
await xapi.deleteVm(previousVMEntry.vm, true) await glusterCmd(glusterEndpoint, 'peer detach ' + previousIp)
data.nodes.splice(nodeIndex, 1, {
brickName: addressAndHost.brickName,
host: addressAndHost.host.$id,
arbiter: arbiter,
vm: {ip: addressAndHost.address, id: newVM.$id},
underlyingSr: newLvmSr
})
await xapi.xo.setData(xosansr, 'xosan_config', data)
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
if (previousVMEntry) {
await xapi.deleteVm(previousVMEntry.vm, true)
}
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 3}
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} finally {
delete CURRENT_POOL_OPERATIONS[poolId]
} }
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} }
replaceBrick.description = 'replaceBrick brick in gluster volume' replaceBrick.description = 'replaceBrick brick in gluster volume'
replaceBrick.permission = 'admin' replaceBrick.permission = 'admin'
replaceBrick.params = { replaceBrick.params = {
xosansr: { type: 'string' }, xosansr: {type: 'string'},
previousBrick: { type: 'string' }, previousBrick: {type: 'string'},
newLvmSr: { type: 'string' }, newLvmSr: {type: 'string'},
brickSize: { type: 'number' } brickSize: {type: 'number'}
} }
replaceBrick.resolve = { replaceBrick.resolve = {
xosansr: ['sr', 'SR', 'administrate'] xosansr: ['sr', 'SR', 'administrate']
} }
async function _prepareGlusterVm (xapi, lvmSr, newVM, xosanNetwork, ipAddress, {labelSuffix = '', increaseDataDisk = true, async function _prepareGlusterVm (xapi, lvmSr, newVM, xosanNetwork, ipAddress, {
maxDiskSize = Infinity, memorySize = 2 * GIGABYTE}) { labelSuffix = '', increaseDataDisk = true,
maxDiskSize = Infinity, memorySize = 2 * GIGABYTE
}) {
const host = lvmSr.$PBDs[0].$host const host = lvmSr.$PBDs[0].$host
const xenstoreData = { const xenstoreData = {
'vm-data/hostname': 'XOSAN' + lvmSr.name_label + labelSuffix, 'vm-data/hostname': 'XOSAN' + lvmSr.name_label + labelSuffix,
@ -580,26 +708,25 @@ async function _prepareGlusterVm (xapi, lvmSr, newVM, xosanNetwork, ipAddress, {
const smallDiskSize = 1073741824 const smallDiskSize = 1073741824
const deviceFile = await createNewDisk(xapi, lvmSr, newVM, increaseDataDisk ? newSize : smallDiskSize) const deviceFile = await createNewDisk(xapi, lvmSr, newVM, increaseDataDisk ? newSize : smallDiskSize)
const brickName = await mountNewDisk(localEndpoint, ip, deviceFile) const brickName = await mountNewDisk(localEndpoint, ip, deviceFile)
return { address: ip, host, vm, underlyingSr: lvmSr, brickName } return {address: ip, host, vm, underlyingSr: lvmSr, brickName}
} }
async function _importGlusterVM (xapi, template, lvmsrId) { async function _importGlusterVM (xapi, template, lvmsrId) {
const templateStream = await this.requestResource('xosan', template.id, template.version) const templateStream = await this.requestResource('xosan', template.id, template.version)
const newVM = await xapi.importVm(templateStream, { srId: lvmsrId, type: 'xva' }) const newVM = await xapi.importVm(templateStream, {srId: lvmsrId, type: 'xva'})
await xapi.editVm(newVM, { await xapi.editVm(newVM, {
autoPoweron: true autoPoweron: true
}) })
return newVM return newVM
} }
function _findAFreeIPAddress (nodes) { function _findAFreeIPAddress (nodes, networkPrefix) {
return _findIPAddressOutsideList(map(nodes, n => n.vm.ip)) return _findIPAddressOutsideList(map(nodes, n => n.vm.ip), networkPrefix)
} }
function _findIPAddressOutsideList (reservedList) { function _findIPAddressOutsideList (reservedList, networkPrefix, vmIpLastNumber = 101) {
const vmIpLastNumber = 101
for (let i = vmIpLastNumber; i < 255; i++) { for (let i = vmIpLastNumber; i < 255; i++) {
const candidate = NETWORK_PREFIX + i const candidate = networkPrefix + i
if (!reservedList.find(a => a === candidate)) { if (!reservedList.find(a => a === candidate)) {
return candidate return candidate
} }
@ -612,11 +739,13 @@ const _median = arr => {
return arr[Math.floor(arr.length / 2)] return arr[Math.floor(arr.length / 2)]
} }
const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xosansr, lvmsrId, {labelSuffix = '', const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xosansr, lvmsrId, {
glusterEndpoint = null, ipAddress = null, increaseDataDisk = true, brickSize = Infinity}) { labelSuffix = '',
const data = xapi.xo.getData(xosansr, 'xosan_config') glusterEndpoint = null, ipAddress = null, increaseDataDisk = true, brickSize = Infinity
}) {
const data = getXosanConfig(xosansr, xapi)
if (ipAddress === null) { if (ipAddress === null) {
ipAddress = _findAFreeIPAddress(data.nodes) ipAddress = _findAFreeIPAddress(data.nodes, data.networkPrefix)
} }
const vmsMemories = [] const vmsMemories = []
for (let node of data.nodes) { for (let node of data.nodes) {
@ -631,33 +760,40 @@ const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xos
// can't really copy an existing VM, because existing gluster VMs disks might too large to be copied. // can't really copy an existing VM, because existing gluster VMs disks might too large to be copied.
const newVM = await this::_importGlusterVM(xapi, data.template, lvmsrId) const newVM = await this::_importGlusterVM(xapi, data.template, lvmsrId)
$onFailure(() => xapi.deleteVm(newVM, true)) $onFailure(() => xapi.deleteVm(newVM, true))
const addressAndHost = await _prepareGlusterVm(xapi, srObject, newVM, xosanNetwork, ipAddress, {labelSuffix, const addressAndHost = await _prepareGlusterVm(xapi, srObject, newVM, xosanNetwork, ipAddress, {
labelSuffix,
increaseDataDisk, increaseDataDisk,
maxDiskSize: brickSize, maxDiskSize: brickSize,
memorySize: vmsMemories.length ? _median(vmsMemories) : 2 * GIGABYTE}) memorySize: vmsMemories.length ? _median(vmsMemories) : 2 * GIGABYTE
})
if (!glusterEndpoint) { if (!glusterEndpoint) {
glusterEndpoint = this::_getGlusterEndpoint(xosansr) glusterEndpoint = this::_getGlusterEndpoint(xosansr)
} }
await _probePoolAndWaitForPresence(glusterEndpoint, [addressAndHost.address]) await _probePoolAndWaitForPresence(glusterEndpoint, [addressAndHost.address])
return { data, newVM, addressAndHost, glusterEndpoint } return {data, newVM, addressAndHost, glusterEndpoint}
}) })
export const addBricks = defer.onFailure(async function ($onFailure, {xosansr, lvmsrs, brickSize}) { export const addBricks = defer.onFailure(async function ($onFailure, {xosansr, lvmsrs, brickSize}) {
const OPERATION_OBJECT = {
operation: 'addBricks',
states: ['insertingNewVms', 'addingBricks', 'scanningSr']
}
const xapi = this.getXapi(xosansr) const xapi = this.getXapi(xosansr)
if (CURRENTLY_CREATING_SRS[xapi.pool.$id]) { const poolId = xapi.pool.$id
if (CURRENT_POOL_OPERATIONS[poolId]) {
throw new Error('createSR is already running for this pool') throw new Error('createSR is already running for this pool')
} }
CURRENTLY_CREATING_SRS[xapi.pool.$id] = true CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
try { try {
const data = xapi.xo.getData(xosansr, 'xosan_config') const data = getXosanConfig(xosansr, xapi)
const usedAddresses = map(data.nodes, n => n.vm.ip) const usedAddresses = map(data.nodes, n => n.vm.ip)
const glusterEndpoint = this::_getGlusterEndpoint(xosansr) const glusterEndpoint = this::_getGlusterEndpoint(xosansr)
const newAddresses = [] const newAddresses = []
const newNodes = [] const newNodes = []
for (let newSr of lvmsrs) { for (let newSr of lvmsrs) {
const ipAddress = _findIPAddressOutsideList(usedAddresses.concat(newAddresses)) const ipAddress = _findIPAddressOutsideList(usedAddresses.concat(newAddresses), data.networkPrefix)
newAddresses.push(ipAddress) newAddresses.push(ipAddress)
const {newVM, addressAndHost} = await this::insertNewGlusterVm(xapi, xosansr, newSr, { ipAddress, brickSize }) const {newVM, addressAndHost} = await this::insertNewGlusterVm(xapi, xosansr, newSr, {ipAddress, brickSize})
$onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + ipAddress, true)) $onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + ipAddress, true))
$onFailure(() => xapi.deleteVm(newVM, true)) $onFailure(() => xapi.deleteVm(newVM, true))
const brickName = addressAndHost.brickName const brickName = addressAndHost.brickName
@ -673,24 +809,27 @@ export const addBricks = defer.onFailure(async function ($onFailure, {xosansr, l
await glusterCmd(glusterEndpoint, 'peer detach ' + arbiterNode.vm.ip, true) await glusterCmd(glusterEndpoint, 'peer detach ' + arbiterNode.vm.ip, true)
await xapi.deleteVm(arbiterNode.vm.id, true) await xapi.deleteVm(arbiterNode.vm.id, true)
} }
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
await glusterCmd(glusterEndpoint, `volume add-brick xosan ${newNodes.map(n => n.brickName).join(' ')}`) await glusterCmd(glusterEndpoint, `volume add-brick xosan ${newNodes.map(n => n.brickName).join(' ')}`)
data.nodes = data.nodes.concat(newNodes) data.nodes = data.nodes.concat(newNodes)
await xapi.xo.setData(xosansr, 'xosan_config', data) await xapi.xo.setData(xosansr, 'xosan_config', data)
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref) await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} finally { } finally {
delete CURRENTLY_CREATING_SRS[xapi.pool.$id] delete CURRENT_POOL_OPERATIONS[poolId]
} }
}) })
addBricks.description = 'add brick to XOSAN SR' addBricks.description = 'add brick to XOSAN SR'
addBricks.permission = 'admin' addBricks.permission = 'admin'
addBricks.params = { addBricks.params = {
xosansr: { type: 'string' }, xosansr: {type: 'string'},
lvmsrs: { lvmsrs: {
type: 'array', type: 'array',
items: { items: {
type: 'string' type: 'string'
} }, }
},
brickSize: {type: 'number'} brickSize: {type: 'number'}
} }
@ -699,14 +838,14 @@ addBricks.resolve = {
lvmsrs: ['sr', 'SR', 'administrate'] lvmsrs: ['sr', 'SR', 'administrate']
} }
export const removeBricks = defer.onFailure(async function ($onFailure, { xosansr, bricks }) { export const removeBricks = defer.onFailure(async function ($onFailure, {xosansr, bricks}) {
const xapi = this.getXapi(xosansr) const xapi = this.getXapi(xosansr)
if (CURRENTLY_CREATING_SRS[xapi.pool.$id]) { if (CURRENT_POOL_OPERATIONS[xapi.pool.$id]) {
throw new Error('this there is already a XOSAN operation running on this pool') throw new Error('this there is already a XOSAN operation running on this pool')
} }
CURRENTLY_CREATING_SRS[xapi.pool.$id] = true CURRENT_POOL_OPERATIONS[xapi.pool.$id] = true
try { try {
const data = xapi.xo.getData(xosansr, 'xosan_config') const data = getXosanConfig(xosansr, xapi)
// IPV6 // IPV6
const ips = map(bricks, b => b.split(':')[0]) const ips = map(bricks, b => b.split(':')[0])
const glusterEndpoint = this::_getGlusterEndpoint(xosansr) const glusterEndpoint = this::_getGlusterEndpoint(xosansr)
@ -721,60 +860,61 @@ export const removeBricks = defer.onFailure(async function ($onFailure, { xosans
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref) await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
await asyncMap(brickVMs, vm => xapi.deleteVm(vm.vm, true)) await asyncMap(brickVMs, vm => xapi.deleteVm(vm.vm, true))
} finally { } finally {
delete CURRENTLY_CREATING_SRS[xapi.pool.$id] delete CURRENT_POOL_OPERATIONS[xapi.pool.$id]
} }
}) })
removeBricks.description = 'remove brick from XOSAN SR' removeBricks.description = 'remove brick from XOSAN SR'
removeBricks.permission = 'admin' removeBricks.permission = 'admin'
removeBricks.params = { removeBricks.params = {
xosansr: { type: 'string' }, xosansr: {type: 'string'},
bricks: { bricks: {
type: 'array', type: 'array',
items: { type: 'string' } items: {type: 'string'}
} }
} }
export function checkSrIsBusy ({ poolId }) { export function checkSrCurrentState ({poolId}) {
return !!CURRENTLY_CREATING_SRS[poolId] return CURRENT_POOL_OPERATIONS[poolId]
} }
checkSrIsBusy.description = 'checks if there is a xosan SR curently being created on the given pool id'
checkSrIsBusy.permission = 'admin' checkSrCurrentState.description = 'checks if there is an operation currently running on the SR'
checkSrIsBusy.params = { poolId: { type: 'string' } } checkSrCurrentState.permission = 'admin'
checkSrCurrentState.params = {poolId: {type: 'string'}}
const POSSIBLE_CONFIGURATIONS = {} const POSSIBLE_CONFIGURATIONS = {}
POSSIBLE_CONFIGURATIONS[2] = [{ layout: 'replica_arbiter', redundancy: 3, capacity: 1 }] POSSIBLE_CONFIGURATIONS[2] = [{layout: 'replica_arbiter', redundancy: 3, capacity: 1}]
POSSIBLE_CONFIGURATIONS[3] = [ POSSIBLE_CONFIGURATIONS[3] = [
{ layout: 'disperse', redundancy: 1, capacity: 2 }, {layout: 'disperse', redundancy: 1, capacity: 2},
{ layout: 'replica', redundancy: 3, capacity: 1 }] {layout: 'replica', redundancy: 3, capacity: 1}]
POSSIBLE_CONFIGURATIONS[4] = [{ layout: 'replica', redundancy: 2, capacity: 2 }] POSSIBLE_CONFIGURATIONS[4] = [{layout: 'replica', redundancy: 2, capacity: 2}]
POSSIBLE_CONFIGURATIONS[5] = [{ layout: 'disperse', redundancy: 1, capacity: 4 }] POSSIBLE_CONFIGURATIONS[5] = [{layout: 'disperse', redundancy: 1, capacity: 4}]
POSSIBLE_CONFIGURATIONS[6] = [ POSSIBLE_CONFIGURATIONS[6] = [
{ layout: 'disperse', redundancy: 2, capacity: 4 }, {layout: 'disperse', redundancy: 2, capacity: 4},
{ layout: 'replica', redundancy: 2, capacity: 3 }, {layout: 'replica', redundancy: 2, capacity: 3},
{ layout: 'replica', redundancy: 3, capacity: 2 }] {layout: 'replica', redundancy: 3, capacity: 2}]
POSSIBLE_CONFIGURATIONS[7] = [{ layout: 'disperse', redundancy: 3, capacity: 4 }] POSSIBLE_CONFIGURATIONS[7] = [{layout: 'disperse', redundancy: 3, capacity: 4}]
POSSIBLE_CONFIGURATIONS[8] = [{ layout: 'replica', redundancy: 2, capacity: 4 }] POSSIBLE_CONFIGURATIONS[8] = [{layout: 'replica', redundancy: 2, capacity: 4}]
POSSIBLE_CONFIGURATIONS[9] = [ POSSIBLE_CONFIGURATIONS[9] = [
{ layout: 'disperse', redundancy: 1, capacity: 8 }, {layout: 'disperse', redundancy: 1, capacity: 8},
{ layout: 'replica', redundancy: 3, capacity: 3 }] {layout: 'replica', redundancy: 3, capacity: 3}]
POSSIBLE_CONFIGURATIONS[10] = [ POSSIBLE_CONFIGURATIONS[10] = [
{ layout: 'disperse', redundancy: 2, capacity: 8 }, {layout: 'disperse', redundancy: 2, capacity: 8},
{ layout: 'replica', redundancy: 2, capacity: 5 }] {layout: 'replica', redundancy: 2, capacity: 5}]
POSSIBLE_CONFIGURATIONS[11] = [{ layout: 'disperse', redundancy: 3, capacity: 8 }] POSSIBLE_CONFIGURATIONS[11] = [{layout: 'disperse', redundancy: 3, capacity: 8}]
POSSIBLE_CONFIGURATIONS[12] = [ POSSIBLE_CONFIGURATIONS[12] = [
{ layout: 'disperse', redundancy: 4, capacity: 8 }, {layout: 'disperse', redundancy: 4, capacity: 8},
{ layout: 'replica', redundancy: 2, capacity: 6 }] {layout: 'replica', redundancy: 2, capacity: 6}]
POSSIBLE_CONFIGURATIONS[13] = [{ layout: 'disperse', redundancy: 5, capacity: 8 }] POSSIBLE_CONFIGURATIONS[13] = [{layout: 'disperse', redundancy: 5, capacity: 8}]
POSSIBLE_CONFIGURATIONS[14] = [ POSSIBLE_CONFIGURATIONS[14] = [
{ layout: 'disperse', redundancy: 6, capacity: 8 }, {layout: 'disperse', redundancy: 6, capacity: 8},
{ layout: 'replica', redundancy: 2, capacity: 7 }] {layout: 'replica', redundancy: 2, capacity: 7}]
POSSIBLE_CONFIGURATIONS[15] = [ POSSIBLE_CONFIGURATIONS[15] = [
{ layout: 'disperse', redundancy: 7, capacity: 8 }, {layout: 'disperse', redundancy: 7, capacity: 8},
{ layout: 'replica', redundancy: 3, capacity: 5 }] {layout: 'replica', redundancy: 3, capacity: 5}]
POSSIBLE_CONFIGURATIONS[16] = [{ layout: 'replica', redundancy: 2, capacity: 8 }] POSSIBLE_CONFIGURATIONS[16] = [{layout: 'replica', redundancy: 2, capacity: 8}]
export async function computeXosanPossibleOptions ({ lvmSrs, brickSize = Infinity }) { export async function computeXosanPossibleOptions ({lvmSrs, brickSize = Infinity}) {
const count = lvmSrs.length const count = lvmSrs.length
const configurations = POSSIBLE_CONFIGURATIONS[count] const configurations = POSSIBLE_CONFIGURATIONS[count]
if (!configurations) { if (!configurations) {
@ -786,7 +926,7 @@ export async function computeXosanPossibleOptions ({ lvmSrs, brickSize = Infinit
const srSizes = map(srs, sr => sr.physical_size - sr.physical_utilisation) const srSizes = map(srs, sr => sr.physical_size - sr.physical_utilisation)
const minSize = Math.min.apply(null, srSizes.concat(brickSize)) const minSize = Math.min.apply(null, srSizes.concat(brickSize))
const finalBrickSize = Math.floor((minSize - XOSAN_VM_SYSTEM_DISK_SIZE) * XOSAN_DATA_DISK_USEAGE_RATIO) const finalBrickSize = Math.floor((minSize - XOSAN_VM_SYSTEM_DISK_SIZE) * XOSAN_DATA_DISK_USEAGE_RATIO)
return configurations.map(conf => ({ ...conf, availableSpace: finalBrickSize * conf.capacity })) return configurations.map(conf => ({...conf, availableSpace: Math.max(0, finalBrickSize * conf.capacity)}))
} }
} }
@ -804,7 +944,7 @@ computeXosanPossibleOptions.params = {
// --------------------------------------------------------------------- // ---------------------------------------------------------------------
export async function downloadAndInstallXosanPack ({ id, version, pool }) { export async function downloadAndInstallXosanPack ({id, version, pool}) {
if (!this.requestResource) { if (!this.requestResource) {
throw new Error('requestResource is not a function') throw new Error('requestResource is not a function')
} }
@ -821,9 +961,9 @@ export async function downloadAndInstallXosanPack ({ id, version, pool }) {
downloadAndInstallXosanPack.description = 'Register a resource via cloud plugin' downloadAndInstallXosanPack.description = 'Register a resource via cloud plugin'
downloadAndInstallXosanPack.params = { downloadAndInstallXosanPack.params = {
id: { type: 'string' }, id: {type: 'string'},
version: { type: 'string' }, version: {type: 'string'},
pool: { type: 'string' } pool: {type: 'string'}
} }
downloadAndInstallXosanPack.resolve = { downloadAndInstallXosanPack.resolve = {