feat(xosan): beta 2+ (#601)

This commit is contained in:
Nicolas Raynaud 2017-09-29 05:46:34 -07:00 committed by Julien Fontanet
parent c996b61eae
commit 6b17e80e28

View File

@ -9,6 +9,7 @@ import {
isArray,
remove,
filter,
find,
range
} from 'lodash'
import {
@ -19,18 +20,30 @@ import {
const debug = createLogger('xo:xosan')
const SSH_KEY_FILE = 'id_rsa_xosan'
const NETWORK_PREFIX = '172.31.100.'
const DEFAULT_NETWORK_PREFIX = '172.31.100.'
const VM_FIRST_NUMBER = 101
const HOST_FIRST_NUMBER = 1
const GIGABYTE = 1024 * 1024 * 1024
const XOSAN_VM_SYSTEM_DISK_SIZE = 10 * GIGABYTE
const XOSAN_DATA_DISK_USEAGE_RATIO = 0.99
const XOSAN_MAX_DISK_SIZE = 2093050 * 1024 * 1024 // a bit under 2To
const CURRENTLY_CREATING_SRS = {}
const CURRENT_POOL_OPERATIONS = {}
function getXosanConfig (xosansr, xapi = this.getXapi(xosansr)) {
const data = xapi.xo.getData(xosansr, 'xosan_config')
if (data && data.networkPrefix === undefined) {
// some xosan might have been created before this field was added
data.networkPrefix = DEFAULT_NETWORK_PREFIX
// fire and forget
xapi.xo.setData(xosansr, 'xosan_config', data)
}
return data
}
function _getIPToVMDict (xapi, sr) {
const dict = {}
const data = xapi.xo.getData(sr, 'xosan_config')
const data = getXosanConfig(sr, xapi)
if (data && data.nodes) {
data.nodes.forEach(conf => {
try {
@ -45,11 +58,16 @@ function _getIPToVMDict (xapi, sr) {
function _getGlusterEndpoint (sr) {
const xapi = this.getXapi(sr)
const data = xapi.xo.getData(sr, 'xosan_config')
const data = getXosanConfig(sr, xapi)
if (!data || !data.nodes) {
return null
}
return { xapi, data: data, hosts: map(data.nodes, node => xapi.getObject(node.host)), addresses: map(data.nodes, node => node.vm.ip) }
return {
xapi,
data: data,
hosts: map(data.nodes, node => xapi.getObject(node.host)),
addresses: map(data.nodes, node => node.vm.ip)
}
}
async function rateLimitedRetry (action, shouldRetry, retryCount = 20) {
@ -64,7 +82,7 @@ async function rateLimitedRetry (action, shouldRetry, retryCount = 20) {
return result
}
export async function getVolumeInfo ({ sr, infoType }) {
export async function getVolumeInfo ({sr, infoType}) {
const glusterEndpoint = this::_getGlusterEndpoint(sr)
function parseHeal (parsed) {
@ -98,23 +116,36 @@ export async function getVolumeInfo ({ sr, infoType }) {
return {commandStatus: true, result: volume}
}
function sshInfoType (command, handler) {
return async () => {
const cmdShouldRetry = result => !result['commandStatus'] && result.parsed && result.parsed['cliOutput']['opErrno'] === '30802'
const runCmd = async () => glusterCmd(glusterEndpoint, 'volume ' + command, true)
let commandResult = await rateLimitedRetry(runCmd, cmdShouldRetry)
return commandResult['commandStatus'] ? handler(commandResult.parsed['cliOutput']) : commandResult
}
}
function checkHosts () {
const xapi = this.getXapi(sr)
const data = getXosanConfig(sr, xapi)
const network = xapi.getObject(data.network)
const badPifs = filter(network.$PIFs, pif => pif.ip_configuration_mode !== 'Static')
return badPifs.map(pif => ({pif, host: pif.$host.$id}))
}
const infoTypes = {
heal: {command: 'heal xosan info', handler: parseHeal},
status: {command: 'status xosan', handler: parseStatus},
statusDetail: {command: 'status xosan detail', handler: parseStatus},
statusMem: {command: 'status xosan mem', handler: parseStatus},
info: {command: 'info xosan', handler: parseInfo}
heal: sshInfoType('heal xosan info', parseHeal),
status: sshInfoType('status xosan', parseStatus),
statusDetail: sshInfoType('status xosan detail', parseStatus),
statusMem: sshInfoType('status xosan mem', parseStatus),
info: sshInfoType('info xosan', parseInfo),
hosts: this::checkHosts
}
const foundType = infoTypes[infoType]
if (!foundType) {
throw new Error('getVolumeInfo(): "' + infoType + '" is an invalid type')
}
const cmdShouldRetry =
result => !result['commandStatus'] && result.parsed && result.parsed['cliOutput']['opErrno'] === '30802'
const runCmd = async () => glusterCmd(glusterEndpoint, 'volume ' + foundType.command, true)
let commandResult = await rateLimitedRetry(runCmd, cmdShouldRetry)
return commandResult['commandStatus'] ? foundType.handler(commandResult.parsed['cliOutput']) : commandResult
return foundType()
}
getVolumeInfo.description = 'info on gluster volume'
@ -131,12 +162,51 @@ getVolumeInfo.params = {
getVolumeInfo.resolve = {
sr: ['sr', 'SR', 'administrate']
}
function reconfigurePifIP (xapi, pif, newIP) {
xapi.call('PIF.reconfigure_ip', pif.$ref, 'Static', newIP, '255.255.255.0', '', '')
}
// this function should probably become fixSomething(thingToFix, parmas)
export async function fixHostNotInNetwork ({xosanSr, host}) {
const xapi = this.getXapi(xosanSr)
const data = getXosanConfig(xosanSr, xapi)
const network = xapi.getObject(data.network)
const usedAddresses = network.$PIFs.filter(pif => pif.ip_configuration_mode === 'Static').map(pif => pif.IP)
const pif = network.$PIFs.find(pif => pif.ip_configuration_mode !== 'Static' && pif.$host.$id === host)
if (pif) {
const newIP = _findIPAddressOutsideList(usedAddresses, HOST_FIRST_NUMBER)
reconfigurePifIP(xapi, pif, newIP)
await xapi.call('PIF.plug', pif.$ref)
const PBD = find(xosanSr.$PBDs, pbd => pbd.$host.$id === host)
if (PBD) {
await xapi.call('PBD.plug', PBD.$ref)
}
debug('host connected !')
}
}
fixHostNotInNetwork.description = 'put host in xosan network'
fixHostNotInNetwork.permission = 'admin'
fixHostNotInNetwork.params = {
xosanSr: {
type: 'string'
},
host: {
type: 'string'
}
}
fixHostNotInNetwork.resolve = {
sr: ['sr', 'SR', 'administrate']
}
function floor2048 (value) {
return 2048 * Math.floor(value / 2048)
}
async function copyVm (xapi, originalVm, sr) {
return { sr, vm: await xapi.copyVm(originalVm, sr) }
return {sr, vm: await xapi.copyVm(originalVm, sr)}
}
async function callPlugin (xapi, host, command, params) {
@ -210,8 +280,8 @@ async function glusterCmd (glusterEndpoint, cmd, ignoreError = false) {
return result
}
const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure, xapi, pif, vlan) {
let hostIpLastNumber = 1
const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure, xapi, pif, vlan, networkPrefix) {
let hostIpLastNumber = HOST_FIRST_NUMBER
const xosanNetwork = await xapi.createNetwork({
name: 'XOSAN network',
description: 'XOSAN network',
@ -220,8 +290,7 @@ const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure,
vlan: +vlan
})
$onFailure(() => xapi.deleteNetwork(xosanNetwork))
await Promise.all(xosanNetwork.$PIFs.map(pif => xapi.call('PIF.reconfigure_ip', pif.$ref, 'Static',
NETWORK_PREFIX + (hostIpLastNumber++), '255.255.255.0', NETWORK_PREFIX + '1', '')))
await Promise.all(xosanNetwork.$PIFs.map(pif => reconfigurePifIP(xapi, pif, networkPrefix + (hostIpLastNumber++))))
return xosanNetwork
})
@ -253,6 +322,7 @@ const _probePoolAndWaitForPresence = defer.onFailure(async function ($onFailure,
await glusterCmd(glusterEndpoint, 'peer probe ' + address)
$onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + address, true))
})
function shouldRetry (peers) {
for (let peer of peers) {
if (peer.state === '4') {
@ -309,8 +379,15 @@ async function configureGluster (redundancy, ipAndHosts, glusterEndpoint, gluste
await glusterCmd(glusterEndpoint, 'volume start xosan')
}
export const createSR = defer.onFailure(async function ($onFailure, { template, pif, vlan, srs, glusterType,
redundancy, brickSize, memorySize }) {
export const createSR = defer.onFailure(async function ($onFailure, {
template, pif, vlan, srs, glusterType,
redundancy, brickSize, memorySize = 2 * GIGABYTE, ipRange = DEFAULT_NETWORK_PREFIX + '.0'
}) {
const OPERATION_OBJECT = {
operation: 'createSr',
states: ['configuringNetwork', 'importingVm', 'copyingVms',
'configuringVms', 'configuringGluster', 'creatingSr', 'scanningSr']
}
if (!this.requestResource) {
throw new Error('requestResource is not a function')
}
@ -318,16 +395,18 @@ export const createSR = defer.onFailure(async function ($onFailure, { template,
if (srs.length < 1) {
return // TODO: throw an error
}
// '172.31.100.0' -> '172.31.100.'
const networkPrefix = ipRange.split('.').slice(0, 3).join('.') + '.'
let vmIpLastNumber = VM_FIRST_NUMBER
const xapi = this.getXapi(srs[0])
if (CURRENTLY_CREATING_SRS[xapi.pool.$id]) {
const poolId = xapi.pool.$id
if (CURRENT_POOL_OPERATIONS[poolId]) {
throw new Error('createSR is already running for this pool')
}
CURRENTLY_CREATING_SRS[xapi.pool.$id] = true
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
try {
const xosanNetwork = await createNetworkAndInsertHosts(xapi, pif, vlan)
const xosanNetwork = await createNetworkAndInsertHosts(xapi, pif, vlan, networkPrefix)
$onFailure(() => xapi.deleteNetwork(xosanNetwork))
const sshKey = await getOrCreateSshKey(xapi)
const srsObjects = map(srs, srId => xapi.getObject(srId))
@ -338,10 +417,12 @@ export const createSR = defer.onFailure(async function ($onFailure, { template,
})))
const firstSr = srsObjects[0]
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
const firstVM = await this::_importGlusterVM(xapi, template, firstSr)
$onFailure(() => xapi.deleteVm(firstVM, true))
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
const copiedVms = await asyncMap(srsObjects.slice(1), sr =>
copyVm(xapi, firstVM, sr)::tap(({ vm }) =>
copyVm(xapi, firstVM, sr)::tap(({vm}) =>
$onFailure(() => xapi.deleteVm(vm))
)
)
@ -350,29 +431,35 @@ export const createSR = defer.onFailure(async function ($onFailure, { template,
sr: firstSr
}].concat(copiedVms)
let arbiter = null
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 3}
if (srs.length === 2) {
const sr = firstSr
const arbiterIP = NETWORK_PREFIX + (vmIpLastNumber++)
const arbiterIP = networkPrefix + (vmIpLastNumber++)
const arbiterVm = await xapi.copyVm(firstVM, sr)
$onFailure(() => xapi.deleteVm(arbiterVm, true))
arbiter = await _prepareGlusterVm(xapi, sr, arbiterVm, xosanNetwork, arbiterIP, {labelSuffix: '_arbiter',
arbiter = await _prepareGlusterVm(xapi, sr, arbiterVm, xosanNetwork, arbiterIP, {
labelSuffix: '_arbiter',
increaseDataDisk: false,
memorySize})
memorySize
})
arbiter.arbiter = true
}
const ipAndHosts = await asyncMap(vmsAndSrs, vmAndSr => _prepareGlusterVm(xapi, vmAndSr.sr, vmAndSr.vm, xosanNetwork,
NETWORK_PREFIX + (vmIpLastNumber++), {maxDiskSize: brickSize, memorySize}))
const glusterEndpoint = { xapi, hosts: map(ipAndHosts, ih => ih.host), addresses: map(ipAndHosts, ih => ih.address) }
networkPrefix + (vmIpLastNumber++), {maxDiskSize: brickSize, memorySize}))
const glusterEndpoint = {xapi, hosts: map(ipAndHosts, ih => ih.host), addresses: map(ipAndHosts, ih => ih.address)}
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 4}
await configureGluster(redundancy, ipAndHosts, glusterEndpoint, glusterType, arbiter)
debug('xosan gluster volume started')
// We use 10 IPs of the gluster VM range as backup, in the hope that even if the first VM gets destroyed we find at least
// one VM to give mount the volfile.
// It is not possible to edit the device_config after the SR is created and this data is only used at mount time when rebooting
// the hosts.
const backupservers = map(range(VM_FIRST_NUMBER, VM_FIRST_NUMBER + 10), ipLastByte => NETWORK_PREFIX + ipLastByte).join(':')
const config = { server: ipAndHosts[0].address + ':/xosan', backupservers }
const backupservers = map(range(VM_FIRST_NUMBER, VM_FIRST_NUMBER + 10), ipLastByte => networkPrefix + ipLastByte).join(':')
const config = {server: ipAndHosts[0].address + ':/xosan', backupservers}
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 5}
const xosanSrRef = await xapi.call('SR.create', firstSr.$PBDs[0].$host.$ref, config, 0, 'XOSAN', 'XOSAN',
'xosan', '', true, {})
debug('sr created')
// we just forget because the cleanup actions are stacked in the $onFailure system
$onFailure(() => xapi.forgetSr(xosanSrRef))
if (arbiter) {
@ -391,12 +478,14 @@ export const createSR = defer.onFailure(async function ($onFailure, { template,
template: template,
network: xosanNetwork.$id,
type: glusterType,
networkPrefix,
redundancy
})
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 6}
debug('scanning new SR')
await xapi.call('SR.scan', xosanSrRef)
} finally {
delete CURRENTLY_CREATING_SRS[xapi.pool.$id]
delete CURRENT_POOL_OPERATIONS[poolId]
}
})
@ -420,6 +509,12 @@ createSR.params = {
},
redundancy: {
type: 'number'
},
memorySize: {
type: 'number', optional: true
},
ipRange: {
type: 'string', optional: true
}
}
@ -450,85 +545,118 @@ async function mountNewDisk (localEndpoint, hostname, newDeviceFiledeviceFile) {
}
async function replaceBrickOnSameVM (xosansr, previousBrick, newLvmSr, brickSize) {
// TODO: a bit of user input validation on 'previousBrick', it's going to ssh
const previousIp = previousBrick.split(':')[0]
brickSize = brickSize === undefined ? Infinity : brickSize
const xapi = this.getXapi(xosansr)
const data = xapi.xo.getData(xosansr, 'xosan_config')
const nodes = data.nodes
const nodeIndex = nodes.findIndex(node => node.vm.ip === previousIp)
const glusterEndpoint = this::_getGlusterEndpoint(xosansr)
const previousVM = _getIPToVMDict(xapi, xosansr)[previousBrick].vm
const newDeviceFile = await createNewDisk(xapi, newLvmSr, previousVM, brickSize)
const localEndpoint = {
xapi,
hosts: map(nodes, node => xapi.getObject(node.host)),
addresses: [previousIp]
const OPERATION_OBJECT = {
operation: 'replaceBrick',
states: ['creatingNewDisk', 'mountingDisk', 'swappingBrick', 'disconnectingOldDisk', 'scanningSr']
}
const xapi = this.getXapi(xosansr)
const poolId = xapi.pool.$id
try {
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
// TODO: a bit of user input validation on 'previousBrick', it's going to ssh
const previousIp = previousBrick.split(':')[0]
brickSize = brickSize === undefined ? Infinity : brickSize
const data = this::getXosanConfig(xosansr)
const nodes = data.nodes
const nodeIndex = nodes.findIndex(node => node.vm.ip === previousIp)
const glusterEndpoint = this::_getGlusterEndpoint(xosansr)
const previousVM = _getIPToVMDict(xapi, xosansr)[previousBrick].vm
const newDeviceFile = await createNewDisk(xapi, newLvmSr, previousVM, brickSize)
const localEndpoint = {
xapi,
hosts: map(nodes, node => xapi.getObject(node.host)),
addresses: [previousIp]
}
const previousBrickRoot = previousBrick.split(':')[1].split('/').slice(0, 3).join('/')
const previousBrickDevice = (await remoteSsh(localEndpoint, `grep " ${previousBrickRoot} " /proc/mounts | cut -d ' ' -f 1 | sed 's_/dev/__'`)).stdout.trim()
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
const brickName = await mountNewDisk(localEndpoint, previousIp, newDeviceFile)
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
await glusterCmd(glusterEndpoint, `volume replace-brick xosan ${previousBrick} ${brickName} commit force`)
nodes[nodeIndex].brickName = brickName
nodes[nodeIndex].underlyingSr = newLvmSr
await xapi.xo.setData(xosansr, 'xosan_config', data)
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 3}
await umountDisk(localEndpoint, previousBrickRoot)
const previousVBD = previousVM.$VBDs.find(vbd => vbd.device === previousBrickDevice)
await xapi.disconnectVbd(previousVBD)
await xapi.deleteVdi(previousVBD.VDI)
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 4}
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} finally {
delete CURRENT_POOL_OPERATIONS[poolId]
}
const previousBrickRoot = previousBrick.split(':')[1].split('/').slice(0, 3).join('/')
const previousBrickDevice = (await remoteSsh(localEndpoint, `grep " ${previousBrickRoot} " /proc/mounts | cut -d ' ' -f 1 | sed 's_/dev/__'`)).stdout.trim()
const brickName = await mountNewDisk(localEndpoint, previousIp, newDeviceFile)
await glusterCmd(glusterEndpoint, `volume replace-brick xosan ${previousBrick} ${brickName} commit force`)
nodes[nodeIndex].brickName = brickName
nodes[nodeIndex].underlyingSr = newLvmSr
await xapi.xo.setData(xosansr, 'xosan_config', data)
await umountDisk(localEndpoint, previousBrickRoot)
const previousVBD = previousVM.$VBDs.find(vbd => vbd.device === previousBrickDevice)
await xapi.disconnectVbd(previousVBD)
await xapi.deleteVdi(previousVBD.VDI)
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
}
export async function replaceBrick ({ xosansr, previousBrick, newLvmSr, brickSize, onSameVM = true }) {
export async function replaceBrick ({xosansr, previousBrick, newLvmSr, brickSize, onSameVM = true}) {
const OPERATION_OBJECT = {
operation: 'replaceBrick',
states: ['insertingNewVm', 'swapingBrick', 'deletingVm', 'scanningSr']
}
if (onSameVM) {
return this::replaceBrickOnSameVM(xosansr, previousBrick, newLvmSr, brickSize)
}
// TODO: a bit of user input validation on 'previousBrick', it's going to ssh
const previousIp = previousBrick.split(':')[0]
brickSize = brickSize === undefined ? Infinity : brickSize
const xapi = this.getXapi(xosansr)
const nodes = xapi.xo.getData(xosansr, 'xosan_config').nodes
const newIpAddress = _findAFreeIPAddress(nodes)
const nodeIndex = nodes.findIndex(node => node.vm.ip === previousIp)
const stayingNodes = filter(nodes, (node, index) => index !== nodeIndex)
const glusterEndpoint = { xapi,
hosts: map(stayingNodes, node => xapi.getObject(node.host)),
addresses: map(stayingNodes, node => node.vm.ip) }
const previousVMEntry = _getIPToVMDict(xapi, xosansr)[previousBrick]
const arbiter = nodes[nodeIndex].arbiter
let { data, newVM, addressAndHost } = await this::insertNewGlusterVm(xapi, xosansr, newLvmSr,
{labelSuffix: arbiter ? '_arbiter' : '', glusterEndpoint, newIpAddress, increaseDataDisk: !arbiter, brickSize})
await glusterCmd(glusterEndpoint, `volume replace-brick xosan ${previousBrick} ${addressAndHost.brickName} commit force`)
await glusterCmd(glusterEndpoint, 'peer detach ' + previousIp)
data.nodes.splice(nodeIndex, 1, {
brickName: addressAndHost.brickName,
host: addressAndHost.host.$id,
arbiter: arbiter,
vm: {ip: addressAndHost.address, id: newVM.$id},
underlyingSr: newLvmSr
})
await xapi.xo.setData(xosansr, 'xosan_config', data)
if (previousVMEntry) {
await xapi.deleteVm(previousVMEntry.vm, true)
const poolId = xapi.pool.$id
try {
// TODO: a bit of user input validation on 'previousBrick', it's going to ssh
const previousIp = previousBrick.split(':')[0]
brickSize = brickSize === undefined ? Infinity : brickSize
const data = getXosanConfig(xosansr, xapi)
const nodes = data.nodes
const newIpAddress = _findAFreeIPAddress(nodes, data.networkPrefix)
const nodeIndex = nodes.findIndex(node => node.vm.ip === previousIp)
const stayingNodes = filter(nodes, (node, index) => index !== nodeIndex)
const glusterEndpoint = {
xapi,
hosts: map(stayingNodes, node => xapi.getObject(node.host)),
addresses: map(stayingNodes, node => node.vm.ip)
}
const previousVMEntry = _getIPToVMDict(xapi, xosansr)[previousBrick]
const arbiter = nodes[nodeIndex].arbiter
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
let {newVM, addressAndHost} = await this::insertNewGlusterVm(xapi, xosansr, newLvmSr,
{labelSuffix: arbiter ? '_arbiter' : '', glusterEndpoint, newIpAddress, increaseDataDisk: !arbiter, brickSize})
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
await glusterCmd(glusterEndpoint, `volume replace-brick xosan ${previousBrick} ${addressAndHost.brickName} commit force`)
await glusterCmd(glusterEndpoint, 'peer detach ' + previousIp)
data.nodes.splice(nodeIndex, 1, {
brickName: addressAndHost.brickName,
host: addressAndHost.host.$id,
arbiter: arbiter,
vm: {ip: addressAndHost.address, id: newVM.$id},
underlyingSr: newLvmSr
})
await xapi.xo.setData(xosansr, 'xosan_config', data)
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
if (previousVMEntry) {
await xapi.deleteVm(previousVMEntry.vm, true)
}
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 3}
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} finally {
delete CURRENT_POOL_OPERATIONS[poolId]
}
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
}
replaceBrick.description = 'replaceBrick brick in gluster volume'
replaceBrick.permission = 'admin'
replaceBrick.params = {
xosansr: { type: 'string' },
previousBrick: { type: 'string' },
newLvmSr: { type: 'string' },
brickSize: { type: 'number' }
xosansr: {type: 'string'},
previousBrick: {type: 'string'},
newLvmSr: {type: 'string'},
brickSize: {type: 'number'}
}
replaceBrick.resolve = {
xosansr: ['sr', 'SR', 'administrate']
}
async function _prepareGlusterVm (xapi, lvmSr, newVM, xosanNetwork, ipAddress, {labelSuffix = '', increaseDataDisk = true,
maxDiskSize = Infinity, memorySize = 2 * GIGABYTE}) {
async function _prepareGlusterVm (xapi, lvmSr, newVM, xosanNetwork, ipAddress, {
labelSuffix = '', increaseDataDisk = true,
maxDiskSize = Infinity, memorySize = 2 * GIGABYTE
}) {
const host = lvmSr.$PBDs[0].$host
const xenstoreData = {
'vm-data/hostname': 'XOSAN' + lvmSr.name_label + labelSuffix,
@ -580,26 +708,25 @@ async function _prepareGlusterVm (xapi, lvmSr, newVM, xosanNetwork, ipAddress, {
const smallDiskSize = 1073741824
const deviceFile = await createNewDisk(xapi, lvmSr, newVM, increaseDataDisk ? newSize : smallDiskSize)
const brickName = await mountNewDisk(localEndpoint, ip, deviceFile)
return { address: ip, host, vm, underlyingSr: lvmSr, brickName }
return {address: ip, host, vm, underlyingSr: lvmSr, brickName}
}
async function _importGlusterVM (xapi, template, lvmsrId) {
const templateStream = await this.requestResource('xosan', template.id, template.version)
const newVM = await xapi.importVm(templateStream, { srId: lvmsrId, type: 'xva' })
const newVM = await xapi.importVm(templateStream, {srId: lvmsrId, type: 'xva'})
await xapi.editVm(newVM, {
autoPoweron: true
})
return newVM
}
function _findAFreeIPAddress (nodes) {
return _findIPAddressOutsideList(map(nodes, n => n.vm.ip))
function _findAFreeIPAddress (nodes, networkPrefix) {
return _findIPAddressOutsideList(map(nodes, n => n.vm.ip), networkPrefix)
}
function _findIPAddressOutsideList (reservedList) {
const vmIpLastNumber = 101
function _findIPAddressOutsideList (reservedList, networkPrefix, vmIpLastNumber = 101) {
for (let i = vmIpLastNumber; i < 255; i++) {
const candidate = NETWORK_PREFIX + i
const candidate = networkPrefix + i
if (!reservedList.find(a => a === candidate)) {
return candidate
}
@ -612,11 +739,13 @@ const _median = arr => {
return arr[Math.floor(arr.length / 2)]
}
const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xosansr, lvmsrId, {labelSuffix = '',
glusterEndpoint = null, ipAddress = null, increaseDataDisk = true, brickSize = Infinity}) {
const data = xapi.xo.getData(xosansr, 'xosan_config')
const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xosansr, lvmsrId, {
labelSuffix = '',
glusterEndpoint = null, ipAddress = null, increaseDataDisk = true, brickSize = Infinity
}) {
const data = getXosanConfig(xosansr, xapi)
if (ipAddress === null) {
ipAddress = _findAFreeIPAddress(data.nodes)
ipAddress = _findAFreeIPAddress(data.nodes, data.networkPrefix)
}
const vmsMemories = []
for (let node of data.nodes) {
@ -631,33 +760,40 @@ const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xos
// can't really copy an existing VM, because existing gluster VMs disks might too large to be copied.
const newVM = await this::_importGlusterVM(xapi, data.template, lvmsrId)
$onFailure(() => xapi.deleteVm(newVM, true))
const addressAndHost = await _prepareGlusterVm(xapi, srObject, newVM, xosanNetwork, ipAddress, {labelSuffix,
const addressAndHost = await _prepareGlusterVm(xapi, srObject, newVM, xosanNetwork, ipAddress, {
labelSuffix,
increaseDataDisk,
maxDiskSize: brickSize,
memorySize: vmsMemories.length ? _median(vmsMemories) : 2 * GIGABYTE})
memorySize: vmsMemories.length ? _median(vmsMemories) : 2 * GIGABYTE
})
if (!glusterEndpoint) {
glusterEndpoint = this::_getGlusterEndpoint(xosansr)
}
await _probePoolAndWaitForPresence(glusterEndpoint, [addressAndHost.address])
return { data, newVM, addressAndHost, glusterEndpoint }
return {data, newVM, addressAndHost, glusterEndpoint}
})
export const addBricks = defer.onFailure(async function ($onFailure, {xosansr, lvmsrs, brickSize}) {
const OPERATION_OBJECT = {
operation: 'addBricks',
states: ['insertingNewVms', 'addingBricks', 'scanningSr']
}
const xapi = this.getXapi(xosansr)
if (CURRENTLY_CREATING_SRS[xapi.pool.$id]) {
const poolId = xapi.pool.$id
if (CURRENT_POOL_OPERATIONS[poolId]) {
throw new Error('createSR is already running for this pool')
}
CURRENTLY_CREATING_SRS[xapi.pool.$id] = true
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
try {
const data = xapi.xo.getData(xosansr, 'xosan_config')
const data = getXosanConfig(xosansr, xapi)
const usedAddresses = map(data.nodes, n => n.vm.ip)
const glusterEndpoint = this::_getGlusterEndpoint(xosansr)
const newAddresses = []
const newNodes = []
for (let newSr of lvmsrs) {
const ipAddress = _findIPAddressOutsideList(usedAddresses.concat(newAddresses))
const ipAddress = _findIPAddressOutsideList(usedAddresses.concat(newAddresses), data.networkPrefix)
newAddresses.push(ipAddress)
const {newVM, addressAndHost} = await this::insertNewGlusterVm(xapi, xosansr, newSr, { ipAddress, brickSize })
const {newVM, addressAndHost} = await this::insertNewGlusterVm(xapi, xosansr, newSr, {ipAddress, brickSize})
$onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + ipAddress, true))
$onFailure(() => xapi.deleteVm(newVM, true))
const brickName = addressAndHost.brickName
@ -673,24 +809,27 @@ export const addBricks = defer.onFailure(async function ($onFailure, {xosansr, l
await glusterCmd(glusterEndpoint, 'peer detach ' + arbiterNode.vm.ip, true)
await xapi.deleteVm(arbiterNode.vm.id, true)
}
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
await glusterCmd(glusterEndpoint, `volume add-brick xosan ${newNodes.map(n => n.brickName).join(' ')}`)
data.nodes = data.nodes.concat(newNodes)
await xapi.xo.setData(xosansr, 'xosan_config', data)
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
} finally {
delete CURRENTLY_CREATING_SRS[xapi.pool.$id]
delete CURRENT_POOL_OPERATIONS[poolId]
}
})
addBricks.description = 'add brick to XOSAN SR'
addBricks.permission = 'admin'
addBricks.params = {
xosansr: { type: 'string' },
xosansr: {type: 'string'},
lvmsrs: {
type: 'array',
items: {
type: 'string'
} },
}
},
brickSize: {type: 'number'}
}
@ -699,14 +838,14 @@ addBricks.resolve = {
lvmsrs: ['sr', 'SR', 'administrate']
}
export const removeBricks = defer.onFailure(async function ($onFailure, { xosansr, bricks }) {
export const removeBricks = defer.onFailure(async function ($onFailure, {xosansr, bricks}) {
const xapi = this.getXapi(xosansr)
if (CURRENTLY_CREATING_SRS[xapi.pool.$id]) {
if (CURRENT_POOL_OPERATIONS[xapi.pool.$id]) {
throw new Error('this there is already a XOSAN operation running on this pool')
}
CURRENTLY_CREATING_SRS[xapi.pool.$id] = true
CURRENT_POOL_OPERATIONS[xapi.pool.$id] = true
try {
const data = xapi.xo.getData(xosansr, 'xosan_config')
const data = getXosanConfig(xosansr, xapi)
// IPV6
const ips = map(bricks, b => b.split(':')[0])
const glusterEndpoint = this::_getGlusterEndpoint(xosansr)
@ -721,60 +860,61 @@ export const removeBricks = defer.onFailure(async function ($onFailure, { xosans
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
await asyncMap(brickVMs, vm => xapi.deleteVm(vm.vm, true))
} finally {
delete CURRENTLY_CREATING_SRS[xapi.pool.$id]
delete CURRENT_POOL_OPERATIONS[xapi.pool.$id]
}
})
removeBricks.description = 'remove brick from XOSAN SR'
removeBricks.permission = 'admin'
removeBricks.params = {
xosansr: { type: 'string' },
xosansr: {type: 'string'},
bricks: {
type: 'array',
items: { type: 'string' }
items: {type: 'string'}
}
}
export function checkSrIsBusy ({ poolId }) {
return !!CURRENTLY_CREATING_SRS[poolId]
export function checkSrCurrentState ({poolId}) {
return CURRENT_POOL_OPERATIONS[poolId]
}
checkSrIsBusy.description = 'checks if there is a xosan SR curently being created on the given pool id'
checkSrIsBusy.permission = 'admin'
checkSrIsBusy.params = { poolId: { type: 'string' } }
checkSrCurrentState.description = 'checks if there is an operation currently running on the SR'
checkSrCurrentState.permission = 'admin'
checkSrCurrentState.params = {poolId: {type: 'string'}}
const POSSIBLE_CONFIGURATIONS = {}
POSSIBLE_CONFIGURATIONS[2] = [{ layout: 'replica_arbiter', redundancy: 3, capacity: 1 }]
POSSIBLE_CONFIGURATIONS[2] = [{layout: 'replica_arbiter', redundancy: 3, capacity: 1}]
POSSIBLE_CONFIGURATIONS[3] = [
{ layout: 'disperse', redundancy: 1, capacity: 2 },
{ layout: 'replica', redundancy: 3, capacity: 1 }]
POSSIBLE_CONFIGURATIONS[4] = [{ layout: 'replica', redundancy: 2, capacity: 2 }]
POSSIBLE_CONFIGURATIONS[5] = [{ layout: 'disperse', redundancy: 1, capacity: 4 }]
{layout: 'disperse', redundancy: 1, capacity: 2},
{layout: 'replica', redundancy: 3, capacity: 1}]
POSSIBLE_CONFIGURATIONS[4] = [{layout: 'replica', redundancy: 2, capacity: 2}]
POSSIBLE_CONFIGURATIONS[5] = [{layout: 'disperse', redundancy: 1, capacity: 4}]
POSSIBLE_CONFIGURATIONS[6] = [
{ layout: 'disperse', redundancy: 2, capacity: 4 },
{ layout: 'replica', redundancy: 2, capacity: 3 },
{ layout: 'replica', redundancy: 3, capacity: 2 }]
POSSIBLE_CONFIGURATIONS[7] = [{ layout: 'disperse', redundancy: 3, capacity: 4 }]
POSSIBLE_CONFIGURATIONS[8] = [{ layout: 'replica', redundancy: 2, capacity: 4 }]
{layout: 'disperse', redundancy: 2, capacity: 4},
{layout: 'replica', redundancy: 2, capacity: 3},
{layout: 'replica', redundancy: 3, capacity: 2}]
POSSIBLE_CONFIGURATIONS[7] = [{layout: 'disperse', redundancy: 3, capacity: 4}]
POSSIBLE_CONFIGURATIONS[8] = [{layout: 'replica', redundancy: 2, capacity: 4}]
POSSIBLE_CONFIGURATIONS[9] = [
{ layout: 'disperse', redundancy: 1, capacity: 8 },
{ layout: 'replica', redundancy: 3, capacity: 3 }]
{layout: 'disperse', redundancy: 1, capacity: 8},
{layout: 'replica', redundancy: 3, capacity: 3}]
POSSIBLE_CONFIGURATIONS[10] = [
{ layout: 'disperse', redundancy: 2, capacity: 8 },
{ layout: 'replica', redundancy: 2, capacity: 5 }]
POSSIBLE_CONFIGURATIONS[11] = [{ layout: 'disperse', redundancy: 3, capacity: 8 }]
{layout: 'disperse', redundancy: 2, capacity: 8},
{layout: 'replica', redundancy: 2, capacity: 5}]
POSSIBLE_CONFIGURATIONS[11] = [{layout: 'disperse', redundancy: 3, capacity: 8}]
POSSIBLE_CONFIGURATIONS[12] = [
{ layout: 'disperse', redundancy: 4, capacity: 8 },
{ layout: 'replica', redundancy: 2, capacity: 6 }]
POSSIBLE_CONFIGURATIONS[13] = [{ layout: 'disperse', redundancy: 5, capacity: 8 }]
{layout: 'disperse', redundancy: 4, capacity: 8},
{layout: 'replica', redundancy: 2, capacity: 6}]
POSSIBLE_CONFIGURATIONS[13] = [{layout: 'disperse', redundancy: 5, capacity: 8}]
POSSIBLE_CONFIGURATIONS[14] = [
{ layout: 'disperse', redundancy: 6, capacity: 8 },
{ layout: 'replica', redundancy: 2, capacity: 7 }]
{layout: 'disperse', redundancy: 6, capacity: 8},
{layout: 'replica', redundancy: 2, capacity: 7}]
POSSIBLE_CONFIGURATIONS[15] = [
{ layout: 'disperse', redundancy: 7, capacity: 8 },
{ layout: 'replica', redundancy: 3, capacity: 5 }]
POSSIBLE_CONFIGURATIONS[16] = [{ layout: 'replica', redundancy: 2, capacity: 8 }]
{layout: 'disperse', redundancy: 7, capacity: 8},
{layout: 'replica', redundancy: 3, capacity: 5}]
POSSIBLE_CONFIGURATIONS[16] = [{layout: 'replica', redundancy: 2, capacity: 8}]
export async function computeXosanPossibleOptions ({ lvmSrs, brickSize = Infinity }) {
export async function computeXosanPossibleOptions ({lvmSrs, brickSize = Infinity}) {
const count = lvmSrs.length
const configurations = POSSIBLE_CONFIGURATIONS[count]
if (!configurations) {
@ -786,7 +926,7 @@ export async function computeXosanPossibleOptions ({ lvmSrs, brickSize = Infinit
const srSizes = map(srs, sr => sr.physical_size - sr.physical_utilisation)
const minSize = Math.min.apply(null, srSizes.concat(brickSize))
const finalBrickSize = Math.floor((minSize - XOSAN_VM_SYSTEM_DISK_SIZE) * XOSAN_DATA_DISK_USEAGE_RATIO)
return configurations.map(conf => ({ ...conf, availableSpace: finalBrickSize * conf.capacity }))
return configurations.map(conf => ({...conf, availableSpace: Math.max(0, finalBrickSize * conf.capacity)}))
}
}
@ -804,7 +944,7 @@ computeXosanPossibleOptions.params = {
// ---------------------------------------------------------------------
export async function downloadAndInstallXosanPack ({ id, version, pool }) {
export async function downloadAndInstallXosanPack ({id, version, pool}) {
if (!this.requestResource) {
throw new Error('requestResource is not a function')
}
@ -821,9 +961,9 @@ export async function downloadAndInstallXosanPack ({ id, version, pool }) {
downloadAndInstallXosanPack.description = 'Register a resource via cloud plugin'
downloadAndInstallXosanPack.params = {
id: { type: 'string' },
version: { type: 'string' },
pool: { type: 'string' }
id: {type: 'string'},
version: {type: 'string'},
pool: {type: 'string'}
}
downloadAndInstallXosanPack.resolve = {