feat(xosan): beta 2+ (#601)
This commit is contained in:
parent
c996b61eae
commit
6b17e80e28
278
src/api/xosan.js
278
src/api/xosan.js
@ -9,6 +9,7 @@ import {
|
||||
isArray,
|
||||
remove,
|
||||
filter,
|
||||
find,
|
||||
range
|
||||
} from 'lodash'
|
||||
import {
|
||||
@ -19,18 +20,30 @@ import {
|
||||
const debug = createLogger('xo:xosan')
|
||||
|
||||
const SSH_KEY_FILE = 'id_rsa_xosan'
|
||||
const NETWORK_PREFIX = '172.31.100.'
|
||||
const DEFAULT_NETWORK_PREFIX = '172.31.100.'
|
||||
const VM_FIRST_NUMBER = 101
|
||||
const HOST_FIRST_NUMBER = 1
|
||||
const GIGABYTE = 1024 * 1024 * 1024
|
||||
const XOSAN_VM_SYSTEM_DISK_SIZE = 10 * GIGABYTE
|
||||
const XOSAN_DATA_DISK_USEAGE_RATIO = 0.99
|
||||
const XOSAN_MAX_DISK_SIZE = 2093050 * 1024 * 1024 // a bit under 2To
|
||||
|
||||
const CURRENTLY_CREATING_SRS = {}
|
||||
const CURRENT_POOL_OPERATIONS = {}
|
||||
|
||||
function getXosanConfig (xosansr, xapi = this.getXapi(xosansr)) {
|
||||
const data = xapi.xo.getData(xosansr, 'xosan_config')
|
||||
if (data && data.networkPrefix === undefined) {
|
||||
// some xosan might have been created before this field was added
|
||||
data.networkPrefix = DEFAULT_NETWORK_PREFIX
|
||||
// fire and forget
|
||||
xapi.xo.setData(xosansr, 'xosan_config', data)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
function _getIPToVMDict (xapi, sr) {
|
||||
const dict = {}
|
||||
const data = xapi.xo.getData(sr, 'xosan_config')
|
||||
const data = getXosanConfig(sr, xapi)
|
||||
if (data && data.nodes) {
|
||||
data.nodes.forEach(conf => {
|
||||
try {
|
||||
@ -45,11 +58,16 @@ function _getIPToVMDict (xapi, sr) {
|
||||
|
||||
function _getGlusterEndpoint (sr) {
|
||||
const xapi = this.getXapi(sr)
|
||||
const data = xapi.xo.getData(sr, 'xosan_config')
|
||||
const data = getXosanConfig(sr, xapi)
|
||||
if (!data || !data.nodes) {
|
||||
return null
|
||||
}
|
||||
return { xapi, data: data, hosts: map(data.nodes, node => xapi.getObject(node.host)), addresses: map(data.nodes, node => node.vm.ip) }
|
||||
return {
|
||||
xapi,
|
||||
data: data,
|
||||
hosts: map(data.nodes, node => xapi.getObject(node.host)),
|
||||
addresses: map(data.nodes, node => node.vm.ip)
|
||||
}
|
||||
}
|
||||
|
||||
async function rateLimitedRetry (action, shouldRetry, retryCount = 20) {
|
||||
@ -98,23 +116,36 @@ export async function getVolumeInfo ({ sr, infoType }) {
|
||||
return {commandStatus: true, result: volume}
|
||||
}
|
||||
|
||||
function sshInfoType (command, handler) {
|
||||
return async () => {
|
||||
const cmdShouldRetry = result => !result['commandStatus'] && result.parsed && result.parsed['cliOutput']['opErrno'] === '30802'
|
||||
const runCmd = async () => glusterCmd(glusterEndpoint, 'volume ' + command, true)
|
||||
let commandResult = await rateLimitedRetry(runCmd, cmdShouldRetry)
|
||||
return commandResult['commandStatus'] ? handler(commandResult.parsed['cliOutput']) : commandResult
|
||||
}
|
||||
}
|
||||
|
||||
function checkHosts () {
|
||||
const xapi = this.getXapi(sr)
|
||||
const data = getXosanConfig(sr, xapi)
|
||||
const network = xapi.getObject(data.network)
|
||||
const badPifs = filter(network.$PIFs, pif => pif.ip_configuration_mode !== 'Static')
|
||||
return badPifs.map(pif => ({pif, host: pif.$host.$id}))
|
||||
}
|
||||
|
||||
const infoTypes = {
|
||||
heal: {command: 'heal xosan info', handler: parseHeal},
|
||||
status: {command: 'status xosan', handler: parseStatus},
|
||||
statusDetail: {command: 'status xosan detail', handler: parseStatus},
|
||||
statusMem: {command: 'status xosan mem', handler: parseStatus},
|
||||
info: {command: 'info xosan', handler: parseInfo}
|
||||
heal: sshInfoType('heal xosan info', parseHeal),
|
||||
status: sshInfoType('status xosan', parseStatus),
|
||||
statusDetail: sshInfoType('status xosan detail', parseStatus),
|
||||
statusMem: sshInfoType('status xosan mem', parseStatus),
|
||||
info: sshInfoType('info xosan', parseInfo),
|
||||
hosts: this::checkHosts
|
||||
}
|
||||
const foundType = infoTypes[infoType]
|
||||
if (!foundType) {
|
||||
throw new Error('getVolumeInfo(): "' + infoType + '" is an invalid type')
|
||||
}
|
||||
|
||||
const cmdShouldRetry =
|
||||
result => !result['commandStatus'] && result.parsed && result.parsed['cliOutput']['opErrno'] === '30802'
|
||||
const runCmd = async () => glusterCmd(glusterEndpoint, 'volume ' + foundType.command, true)
|
||||
let commandResult = await rateLimitedRetry(runCmd, cmdShouldRetry)
|
||||
return commandResult['commandStatus'] ? foundType.handler(commandResult.parsed['cliOutput']) : commandResult
|
||||
return foundType()
|
||||
}
|
||||
|
||||
getVolumeInfo.description = 'info on gluster volume'
|
||||
@ -131,6 +162,45 @@ getVolumeInfo.params = {
|
||||
getVolumeInfo.resolve = {
|
||||
sr: ['sr', 'SR', 'administrate']
|
||||
}
|
||||
|
||||
function reconfigurePifIP (xapi, pif, newIP) {
|
||||
xapi.call('PIF.reconfigure_ip', pif.$ref, 'Static', newIP, '255.255.255.0', '', '')
|
||||
}
|
||||
|
||||
// this function should probably become fixSomething(thingToFix, parmas)
|
||||
export async function fixHostNotInNetwork ({xosanSr, host}) {
|
||||
const xapi = this.getXapi(xosanSr)
|
||||
const data = getXosanConfig(xosanSr, xapi)
|
||||
const network = xapi.getObject(data.network)
|
||||
const usedAddresses = network.$PIFs.filter(pif => pif.ip_configuration_mode === 'Static').map(pif => pif.IP)
|
||||
const pif = network.$PIFs.find(pif => pif.ip_configuration_mode !== 'Static' && pif.$host.$id === host)
|
||||
if (pif) {
|
||||
const newIP = _findIPAddressOutsideList(usedAddresses, HOST_FIRST_NUMBER)
|
||||
reconfigurePifIP(xapi, pif, newIP)
|
||||
await xapi.call('PIF.plug', pif.$ref)
|
||||
const PBD = find(xosanSr.$PBDs, pbd => pbd.$host.$id === host)
|
||||
if (PBD) {
|
||||
await xapi.call('PBD.plug', PBD.$ref)
|
||||
}
|
||||
debug('host connected !')
|
||||
}
|
||||
}
|
||||
|
||||
fixHostNotInNetwork.description = 'put host in xosan network'
|
||||
fixHostNotInNetwork.permission = 'admin'
|
||||
|
||||
fixHostNotInNetwork.params = {
|
||||
xosanSr: {
|
||||
type: 'string'
|
||||
},
|
||||
host: {
|
||||
type: 'string'
|
||||
}
|
||||
}
|
||||
fixHostNotInNetwork.resolve = {
|
||||
sr: ['sr', 'SR', 'administrate']
|
||||
}
|
||||
|
||||
function floor2048 (value) {
|
||||
return 2048 * Math.floor(value / 2048)
|
||||
}
|
||||
@ -210,8 +280,8 @@ async function glusterCmd (glusterEndpoint, cmd, ignoreError = false) {
|
||||
return result
|
||||
}
|
||||
|
||||
const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure, xapi, pif, vlan) {
|
||||
let hostIpLastNumber = 1
|
||||
const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure, xapi, pif, vlan, networkPrefix) {
|
||||
let hostIpLastNumber = HOST_FIRST_NUMBER
|
||||
const xosanNetwork = await xapi.createNetwork({
|
||||
name: 'XOSAN network',
|
||||
description: 'XOSAN network',
|
||||
@ -220,8 +290,7 @@ const createNetworkAndInsertHosts = defer.onFailure(async function ($onFailure,
|
||||
vlan: +vlan
|
||||
})
|
||||
$onFailure(() => xapi.deleteNetwork(xosanNetwork))
|
||||
await Promise.all(xosanNetwork.$PIFs.map(pif => xapi.call('PIF.reconfigure_ip', pif.$ref, 'Static',
|
||||
NETWORK_PREFIX + (hostIpLastNumber++), '255.255.255.0', NETWORK_PREFIX + '1', '')))
|
||||
await Promise.all(xosanNetwork.$PIFs.map(pif => reconfigurePifIP(xapi, pif, networkPrefix + (hostIpLastNumber++))))
|
||||
return xosanNetwork
|
||||
})
|
||||
|
||||
@ -253,6 +322,7 @@ const _probePoolAndWaitForPresence = defer.onFailure(async function ($onFailure,
|
||||
await glusterCmd(glusterEndpoint, 'peer probe ' + address)
|
||||
$onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + address, true))
|
||||
})
|
||||
|
||||
function shouldRetry (peers) {
|
||||
for (let peer of peers) {
|
||||
if (peer.state === '4') {
|
||||
@ -309,8 +379,15 @@ async function configureGluster (redundancy, ipAndHosts, glusterEndpoint, gluste
|
||||
await glusterCmd(glusterEndpoint, 'volume start xosan')
|
||||
}
|
||||
|
||||
export const createSR = defer.onFailure(async function ($onFailure, { template, pif, vlan, srs, glusterType,
|
||||
redundancy, brickSize, memorySize }) {
|
||||
export const createSR = defer.onFailure(async function ($onFailure, {
|
||||
template, pif, vlan, srs, glusterType,
|
||||
redundancy, brickSize, memorySize = 2 * GIGABYTE, ipRange = DEFAULT_NETWORK_PREFIX + '.0'
|
||||
}) {
|
||||
const OPERATION_OBJECT = {
|
||||
operation: 'createSr',
|
||||
states: ['configuringNetwork', 'importingVm', 'copyingVms',
|
||||
'configuringVms', 'configuringGluster', 'creatingSr', 'scanningSr']
|
||||
}
|
||||
if (!this.requestResource) {
|
||||
throw new Error('requestResource is not a function')
|
||||
}
|
||||
@ -318,16 +395,18 @@ export const createSR = defer.onFailure(async function ($onFailure, { template,
|
||||
if (srs.length < 1) {
|
||||
return // TODO: throw an error
|
||||
}
|
||||
|
||||
// '172.31.100.0' -> '172.31.100.'
|
||||
const networkPrefix = ipRange.split('.').slice(0, 3).join('.') + '.'
|
||||
let vmIpLastNumber = VM_FIRST_NUMBER
|
||||
const xapi = this.getXapi(srs[0])
|
||||
if (CURRENTLY_CREATING_SRS[xapi.pool.$id]) {
|
||||
const poolId = xapi.pool.$id
|
||||
if (CURRENT_POOL_OPERATIONS[poolId]) {
|
||||
throw new Error('createSR is already running for this pool')
|
||||
}
|
||||
|
||||
CURRENTLY_CREATING_SRS[xapi.pool.$id] = true
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
|
||||
try {
|
||||
const xosanNetwork = await createNetworkAndInsertHosts(xapi, pif, vlan)
|
||||
const xosanNetwork = await createNetworkAndInsertHosts(xapi, pif, vlan, networkPrefix)
|
||||
$onFailure(() => xapi.deleteNetwork(xosanNetwork))
|
||||
const sshKey = await getOrCreateSshKey(xapi)
|
||||
const srsObjects = map(srs, srId => xapi.getObject(srId))
|
||||
@ -338,8 +417,10 @@ export const createSR = defer.onFailure(async function ($onFailure, { template,
|
||||
})))
|
||||
|
||||
const firstSr = srsObjects[0]
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
|
||||
const firstVM = await this::_importGlusterVM(xapi, template, firstSr)
|
||||
$onFailure(() => xapi.deleteVm(firstVM, true))
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
|
||||
const copiedVms = await asyncMap(srsObjects.slice(1), sr =>
|
||||
copyVm(xapi, firstVM, sr)::tap(({vm}) =>
|
||||
$onFailure(() => xapi.deleteVm(vm))
|
||||
@ -350,29 +431,35 @@ export const createSR = defer.onFailure(async function ($onFailure, { template,
|
||||
sr: firstSr
|
||||
}].concat(copiedVms)
|
||||
let arbiter = null
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 3}
|
||||
if (srs.length === 2) {
|
||||
const sr = firstSr
|
||||
const arbiterIP = NETWORK_PREFIX + (vmIpLastNumber++)
|
||||
const arbiterIP = networkPrefix + (vmIpLastNumber++)
|
||||
const arbiterVm = await xapi.copyVm(firstVM, sr)
|
||||
$onFailure(() => xapi.deleteVm(arbiterVm, true))
|
||||
arbiter = await _prepareGlusterVm(xapi, sr, arbiterVm, xosanNetwork, arbiterIP, {labelSuffix: '_arbiter',
|
||||
arbiter = await _prepareGlusterVm(xapi, sr, arbiterVm, xosanNetwork, arbiterIP, {
|
||||
labelSuffix: '_arbiter',
|
||||
increaseDataDisk: false,
|
||||
memorySize})
|
||||
memorySize
|
||||
})
|
||||
arbiter.arbiter = true
|
||||
}
|
||||
const ipAndHosts = await asyncMap(vmsAndSrs, vmAndSr => _prepareGlusterVm(xapi, vmAndSr.sr, vmAndSr.vm, xosanNetwork,
|
||||
NETWORK_PREFIX + (vmIpLastNumber++), {maxDiskSize: brickSize, memorySize}))
|
||||
networkPrefix + (vmIpLastNumber++), {maxDiskSize: brickSize, memorySize}))
|
||||
const glusterEndpoint = {xapi, hosts: map(ipAndHosts, ih => ih.host), addresses: map(ipAndHosts, ih => ih.address)}
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 4}
|
||||
await configureGluster(redundancy, ipAndHosts, glusterEndpoint, glusterType, arbiter)
|
||||
debug('xosan gluster volume started')
|
||||
// We use 10 IPs of the gluster VM range as backup, in the hope that even if the first VM gets destroyed we find at least
|
||||
// one VM to give mount the volfile.
|
||||
// It is not possible to edit the device_config after the SR is created and this data is only used at mount time when rebooting
|
||||
// the hosts.
|
||||
const backupservers = map(range(VM_FIRST_NUMBER, VM_FIRST_NUMBER + 10), ipLastByte => NETWORK_PREFIX + ipLastByte).join(':')
|
||||
const backupservers = map(range(VM_FIRST_NUMBER, VM_FIRST_NUMBER + 10), ipLastByte => networkPrefix + ipLastByte).join(':')
|
||||
const config = {server: ipAndHosts[0].address + ':/xosan', backupservers}
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 5}
|
||||
const xosanSrRef = await xapi.call('SR.create', firstSr.$PBDs[0].$host.$ref, config, 0, 'XOSAN', 'XOSAN',
|
||||
'xosan', '', true, {})
|
||||
debug('sr created')
|
||||
// we just forget because the cleanup actions are stacked in the $onFailure system
|
||||
$onFailure(() => xapi.forgetSr(xosanSrRef))
|
||||
if (arbiter) {
|
||||
@ -391,12 +478,14 @@ export const createSR = defer.onFailure(async function ($onFailure, { template,
|
||||
template: template,
|
||||
network: xosanNetwork.$id,
|
||||
type: glusterType,
|
||||
networkPrefix,
|
||||
redundancy
|
||||
})
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 6}
|
||||
debug('scanning new SR')
|
||||
await xapi.call('SR.scan', xosanSrRef)
|
||||
} finally {
|
||||
delete CURRENTLY_CREATING_SRS[xapi.pool.$id]
|
||||
delete CURRENT_POOL_OPERATIONS[poolId]
|
||||
}
|
||||
})
|
||||
|
||||
@ -420,6 +509,12 @@ createSR.params = {
|
||||
},
|
||||
redundancy: {
|
||||
type: 'number'
|
||||
},
|
||||
memorySize: {
|
||||
type: 'number', optional: true
|
||||
},
|
||||
ipRange: {
|
||||
type: 'string', optional: true
|
||||
}
|
||||
}
|
||||
|
||||
@ -450,11 +545,19 @@ async function mountNewDisk (localEndpoint, hostname, newDeviceFiledeviceFile) {
|
||||
}
|
||||
|
||||
async function replaceBrickOnSameVM (xosansr, previousBrick, newLvmSr, brickSize) {
|
||||
const OPERATION_OBJECT = {
|
||||
operation: 'replaceBrick',
|
||||
states: ['creatingNewDisk', 'mountingDisk', 'swappingBrick', 'disconnectingOldDisk', 'scanningSr']
|
||||
}
|
||||
const xapi = this.getXapi(xosansr)
|
||||
const poolId = xapi.pool.$id
|
||||
try {
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
|
||||
|
||||
// TODO: a bit of user input validation on 'previousBrick', it's going to ssh
|
||||
const previousIp = previousBrick.split(':')[0]
|
||||
brickSize = brickSize === undefined ? Infinity : brickSize
|
||||
const xapi = this.getXapi(xosansr)
|
||||
const data = xapi.xo.getData(xosansr, 'xosan_config')
|
||||
const data = this::getXosanConfig(xosansr)
|
||||
const nodes = data.nodes
|
||||
const nodeIndex = nodes.findIndex(node => node.vm.ip === previousIp)
|
||||
const glusterEndpoint = this::_getGlusterEndpoint(xosansr)
|
||||
@ -467,37 +570,55 @@ async function replaceBrickOnSameVM (xosansr, previousBrick, newLvmSr, brickSize
|
||||
}
|
||||
const previousBrickRoot = previousBrick.split(':')[1].split('/').slice(0, 3).join('/')
|
||||
const previousBrickDevice = (await remoteSsh(localEndpoint, `grep " ${previousBrickRoot} " /proc/mounts | cut -d ' ' -f 1 | sed 's_/dev/__'`)).stdout.trim()
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
|
||||
const brickName = await mountNewDisk(localEndpoint, previousIp, newDeviceFile)
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
|
||||
await glusterCmd(glusterEndpoint, `volume replace-brick xosan ${previousBrick} ${brickName} commit force`)
|
||||
nodes[nodeIndex].brickName = brickName
|
||||
nodes[nodeIndex].underlyingSr = newLvmSr
|
||||
await xapi.xo.setData(xosansr, 'xosan_config', data)
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 3}
|
||||
await umountDisk(localEndpoint, previousBrickRoot)
|
||||
const previousVBD = previousVM.$VBDs.find(vbd => vbd.device === previousBrickDevice)
|
||||
await xapi.disconnectVbd(previousVBD)
|
||||
await xapi.deleteVdi(previousVBD.VDI)
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 4}
|
||||
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
|
||||
} finally {
|
||||
delete CURRENT_POOL_OPERATIONS[poolId]
|
||||
}
|
||||
}
|
||||
|
||||
export async function replaceBrick ({xosansr, previousBrick, newLvmSr, brickSize, onSameVM = true}) {
|
||||
const OPERATION_OBJECT = {
|
||||
operation: 'replaceBrick',
|
||||
states: ['insertingNewVm', 'swapingBrick', 'deletingVm', 'scanningSr']
|
||||
}
|
||||
if (onSameVM) {
|
||||
return this::replaceBrickOnSameVM(xosansr, previousBrick, newLvmSr, brickSize)
|
||||
}
|
||||
const xapi = this.getXapi(xosansr)
|
||||
const poolId = xapi.pool.$id
|
||||
try {
|
||||
// TODO: a bit of user input validation on 'previousBrick', it's going to ssh
|
||||
const previousIp = previousBrick.split(':')[0]
|
||||
brickSize = brickSize === undefined ? Infinity : brickSize
|
||||
const xapi = this.getXapi(xosansr)
|
||||
const nodes = xapi.xo.getData(xosansr, 'xosan_config').nodes
|
||||
const newIpAddress = _findAFreeIPAddress(nodes)
|
||||
const data = getXosanConfig(xosansr, xapi)
|
||||
const nodes = data.nodes
|
||||
const newIpAddress = _findAFreeIPAddress(nodes, data.networkPrefix)
|
||||
const nodeIndex = nodes.findIndex(node => node.vm.ip === previousIp)
|
||||
const stayingNodes = filter(nodes, (node, index) => index !== nodeIndex)
|
||||
const glusterEndpoint = { xapi,
|
||||
const glusterEndpoint = {
|
||||
xapi,
|
||||
hosts: map(stayingNodes, node => xapi.getObject(node.host)),
|
||||
addresses: map(stayingNodes, node => node.vm.ip) }
|
||||
addresses: map(stayingNodes, node => node.vm.ip)
|
||||
}
|
||||
const previousVMEntry = _getIPToVMDict(xapi, xosansr)[previousBrick]
|
||||
const arbiter = nodes[nodeIndex].arbiter
|
||||
let { data, newVM, addressAndHost } = await this::insertNewGlusterVm(xapi, xosansr, newLvmSr,
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
|
||||
let {newVM, addressAndHost} = await this::insertNewGlusterVm(xapi, xosansr, newLvmSr,
|
||||
{labelSuffix: arbiter ? '_arbiter' : '', glusterEndpoint, newIpAddress, increaseDataDisk: !arbiter, brickSize})
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
|
||||
await glusterCmd(glusterEndpoint, `volume replace-brick xosan ${previousBrick} ${addressAndHost.brickName} commit force`)
|
||||
await glusterCmd(glusterEndpoint, 'peer detach ' + previousIp)
|
||||
data.nodes.splice(nodeIndex, 1, {
|
||||
@ -508,10 +629,15 @@ export async function replaceBrick ({ xosansr, previousBrick, newLvmSr, brickSiz
|
||||
underlyingSr: newLvmSr
|
||||
})
|
||||
await xapi.xo.setData(xosansr, 'xosan_config', data)
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
|
||||
if (previousVMEntry) {
|
||||
await xapi.deleteVm(previousVMEntry.vm, true)
|
||||
}
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 3}
|
||||
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
|
||||
} finally {
|
||||
delete CURRENT_POOL_OPERATIONS[poolId]
|
||||
}
|
||||
}
|
||||
|
||||
replaceBrick.description = 'replaceBrick brick in gluster volume'
|
||||
@ -527,8 +653,10 @@ replaceBrick.resolve = {
|
||||
xosansr: ['sr', 'SR', 'administrate']
|
||||
}
|
||||
|
||||
async function _prepareGlusterVm (xapi, lvmSr, newVM, xosanNetwork, ipAddress, {labelSuffix = '', increaseDataDisk = true,
|
||||
maxDiskSize = Infinity, memorySize = 2 * GIGABYTE}) {
|
||||
async function _prepareGlusterVm (xapi, lvmSr, newVM, xosanNetwork, ipAddress, {
|
||||
labelSuffix = '', increaseDataDisk = true,
|
||||
maxDiskSize = Infinity, memorySize = 2 * GIGABYTE
|
||||
}) {
|
||||
const host = lvmSr.$PBDs[0].$host
|
||||
const xenstoreData = {
|
||||
'vm-data/hostname': 'XOSAN' + lvmSr.name_label + labelSuffix,
|
||||
@ -592,14 +720,13 @@ async function _importGlusterVM (xapi, template, lvmsrId) {
|
||||
return newVM
|
||||
}
|
||||
|
||||
function _findAFreeIPAddress (nodes) {
|
||||
return _findIPAddressOutsideList(map(nodes, n => n.vm.ip))
|
||||
function _findAFreeIPAddress (nodes, networkPrefix) {
|
||||
return _findIPAddressOutsideList(map(nodes, n => n.vm.ip), networkPrefix)
|
||||
}
|
||||
|
||||
function _findIPAddressOutsideList (reservedList) {
|
||||
const vmIpLastNumber = 101
|
||||
function _findIPAddressOutsideList (reservedList, networkPrefix, vmIpLastNumber = 101) {
|
||||
for (let i = vmIpLastNumber; i < 255; i++) {
|
||||
const candidate = NETWORK_PREFIX + i
|
||||
const candidate = networkPrefix + i
|
||||
if (!reservedList.find(a => a === candidate)) {
|
||||
return candidate
|
||||
}
|
||||
@ -612,11 +739,13 @@ const _median = arr => {
|
||||
return arr[Math.floor(arr.length / 2)]
|
||||
}
|
||||
|
||||
const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xosansr, lvmsrId, {labelSuffix = '',
|
||||
glusterEndpoint = null, ipAddress = null, increaseDataDisk = true, brickSize = Infinity}) {
|
||||
const data = xapi.xo.getData(xosansr, 'xosan_config')
|
||||
const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xosansr, lvmsrId, {
|
||||
labelSuffix = '',
|
||||
glusterEndpoint = null, ipAddress = null, increaseDataDisk = true, brickSize = Infinity
|
||||
}) {
|
||||
const data = getXosanConfig(xosansr, xapi)
|
||||
if (ipAddress === null) {
|
||||
ipAddress = _findAFreeIPAddress(data.nodes)
|
||||
ipAddress = _findAFreeIPAddress(data.nodes, data.networkPrefix)
|
||||
}
|
||||
const vmsMemories = []
|
||||
for (let node of data.nodes) {
|
||||
@ -631,10 +760,12 @@ const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xos
|
||||
// can't really copy an existing VM, because existing gluster VMs disks might too large to be copied.
|
||||
const newVM = await this::_importGlusterVM(xapi, data.template, lvmsrId)
|
||||
$onFailure(() => xapi.deleteVm(newVM, true))
|
||||
const addressAndHost = await _prepareGlusterVm(xapi, srObject, newVM, xosanNetwork, ipAddress, {labelSuffix,
|
||||
const addressAndHost = await _prepareGlusterVm(xapi, srObject, newVM, xosanNetwork, ipAddress, {
|
||||
labelSuffix,
|
||||
increaseDataDisk,
|
||||
maxDiskSize: brickSize,
|
||||
memorySize: vmsMemories.length ? _median(vmsMemories) : 2 * GIGABYTE})
|
||||
memorySize: vmsMemories.length ? _median(vmsMemories) : 2 * GIGABYTE
|
||||
})
|
||||
if (!glusterEndpoint) {
|
||||
glusterEndpoint = this::_getGlusterEndpoint(xosansr)
|
||||
}
|
||||
@ -643,19 +774,24 @@ const insertNewGlusterVm = defer.onFailure(async function ($onFailure, xapi, xos
|
||||
})
|
||||
|
||||
export const addBricks = defer.onFailure(async function ($onFailure, {xosansr, lvmsrs, brickSize}) {
|
||||
const OPERATION_OBJECT = {
|
||||
operation: 'addBricks',
|
||||
states: ['insertingNewVms', 'addingBricks', 'scanningSr']
|
||||
}
|
||||
const xapi = this.getXapi(xosansr)
|
||||
if (CURRENTLY_CREATING_SRS[xapi.pool.$id]) {
|
||||
const poolId = xapi.pool.$id
|
||||
if (CURRENT_POOL_OPERATIONS[poolId]) {
|
||||
throw new Error('createSR is already running for this pool')
|
||||
}
|
||||
CURRENTLY_CREATING_SRS[xapi.pool.$id] = true
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 0}
|
||||
try {
|
||||
const data = xapi.xo.getData(xosansr, 'xosan_config')
|
||||
const data = getXosanConfig(xosansr, xapi)
|
||||
const usedAddresses = map(data.nodes, n => n.vm.ip)
|
||||
const glusterEndpoint = this::_getGlusterEndpoint(xosansr)
|
||||
const newAddresses = []
|
||||
const newNodes = []
|
||||
for (let newSr of lvmsrs) {
|
||||
const ipAddress = _findIPAddressOutsideList(usedAddresses.concat(newAddresses))
|
||||
const ipAddress = _findIPAddressOutsideList(usedAddresses.concat(newAddresses), data.networkPrefix)
|
||||
newAddresses.push(ipAddress)
|
||||
const {newVM, addressAndHost} = await this::insertNewGlusterVm(xapi, xosansr, newSr, {ipAddress, brickSize})
|
||||
$onFailure(() => glusterCmd(glusterEndpoint, 'peer detach ' + ipAddress, true))
|
||||
@ -673,12 +809,14 @@ export const addBricks = defer.onFailure(async function ($onFailure, {xosansr, l
|
||||
await glusterCmd(glusterEndpoint, 'peer detach ' + arbiterNode.vm.ip, true)
|
||||
await xapi.deleteVm(arbiterNode.vm.id, true)
|
||||
}
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 1}
|
||||
await glusterCmd(glusterEndpoint, `volume add-brick xosan ${newNodes.map(n => n.brickName).join(' ')}`)
|
||||
data.nodes = data.nodes.concat(newNodes)
|
||||
await xapi.xo.setData(xosansr, 'xosan_config', data)
|
||||
CURRENT_POOL_OPERATIONS[poolId] = {...OPERATION_OBJECT, state: 2}
|
||||
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
|
||||
} finally {
|
||||
delete CURRENTLY_CREATING_SRS[xapi.pool.$id]
|
||||
delete CURRENT_POOL_OPERATIONS[poolId]
|
||||
}
|
||||
})
|
||||
|
||||
@ -690,7 +828,8 @@ addBricks.params = {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'string'
|
||||
} },
|
||||
}
|
||||
},
|
||||
brickSize: {type: 'number'}
|
||||
}
|
||||
|
||||
@ -701,12 +840,12 @@ addBricks.resolve = {
|
||||
|
||||
export const removeBricks = defer.onFailure(async function ($onFailure, {xosansr, bricks}) {
|
||||
const xapi = this.getXapi(xosansr)
|
||||
if (CURRENTLY_CREATING_SRS[xapi.pool.$id]) {
|
||||
if (CURRENT_POOL_OPERATIONS[xapi.pool.$id]) {
|
||||
throw new Error('this there is already a XOSAN operation running on this pool')
|
||||
}
|
||||
CURRENTLY_CREATING_SRS[xapi.pool.$id] = true
|
||||
CURRENT_POOL_OPERATIONS[xapi.pool.$id] = true
|
||||
try {
|
||||
const data = xapi.xo.getData(xosansr, 'xosan_config')
|
||||
const data = getXosanConfig(xosansr, xapi)
|
||||
// IPV6
|
||||
const ips = map(bricks, b => b.split(':')[0])
|
||||
const glusterEndpoint = this::_getGlusterEndpoint(xosansr)
|
||||
@ -721,7 +860,7 @@ export const removeBricks = defer.onFailure(async function ($onFailure, { xosans
|
||||
await xapi.call('SR.scan', xapi.getObject(xosansr).$ref)
|
||||
await asyncMap(brickVMs, vm => xapi.deleteVm(vm.vm, true))
|
||||
} finally {
|
||||
delete CURRENTLY_CREATING_SRS[xapi.pool.$id]
|
||||
delete CURRENT_POOL_OPERATIONS[xapi.pool.$id]
|
||||
}
|
||||
})
|
||||
|
||||
@ -735,12 +874,13 @@ removeBricks.params = {
|
||||
}
|
||||
}
|
||||
|
||||
export function checkSrIsBusy ({ poolId }) {
|
||||
return !!CURRENTLY_CREATING_SRS[poolId]
|
||||
export function checkSrCurrentState ({poolId}) {
|
||||
return CURRENT_POOL_OPERATIONS[poolId]
|
||||
}
|
||||
checkSrIsBusy.description = 'checks if there is a xosan SR curently being created on the given pool id'
|
||||
checkSrIsBusy.permission = 'admin'
|
||||
checkSrIsBusy.params = { poolId: { type: 'string' } }
|
||||
|
||||
checkSrCurrentState.description = 'checks if there is an operation currently running on the SR'
|
||||
checkSrCurrentState.permission = 'admin'
|
||||
checkSrCurrentState.params = {poolId: {type: 'string'}}
|
||||
|
||||
const POSSIBLE_CONFIGURATIONS = {}
|
||||
POSSIBLE_CONFIGURATIONS[2] = [{layout: 'replica_arbiter', redundancy: 3, capacity: 1}]
|
||||
@ -786,7 +926,7 @@ export async function computeXosanPossibleOptions ({ lvmSrs, brickSize = Infinit
|
||||
const srSizes = map(srs, sr => sr.physical_size - sr.physical_utilisation)
|
||||
const minSize = Math.min.apply(null, srSizes.concat(brickSize))
|
||||
const finalBrickSize = Math.floor((minSize - XOSAN_VM_SYSTEM_DISK_SIZE) * XOSAN_DATA_DISK_USEAGE_RATIO)
|
||||
return configurations.map(conf => ({ ...conf, availableSpace: finalBrickSize * conf.capacity }))
|
||||
return configurations.map(conf => ({...conf, availableSpace: Math.max(0, finalBrickSize * conf.capacity)}))
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user