mirror of
https://github.com/libvirt/libvirt.git
synced 2025-02-25 18:55:26 -06:00
snapshot: rudimentary qemu support for atomic disk snapshot
Taking an external snapshot of just one disk is atomic, without having to pause and resume the VM. This also paves the way for later patches to interact with the new qemu 'transaction' monitor command. The various scenarios when requesting atomic are: online, 1 disk, old qemu - safe, allowed by this patch online, more than 1 disk, old qemu - failure, this patch offline snapshot - safe, once a future patch implements offline disk snapshot online, 1 or more disks, new qemu - safe, once future patch uses transaction Taking an online system checkpoint snapshot is atomic, since it is done via a single 'savevm' monitor command. Taking an offline system checkpoint snapshot is atomic, thanks to the previous patch. * src/qemu/qemu_driver.c (qemuDomainSnapshotCreateXML): Support new flag for single-disk setups. (qemuDomainSnapshotDiskPrepare): Check for atomic here. (qemuDomainSnapshotCreateDiskActive): Skip pausing the VM when atomic supported. (qemuDomainSnapshotIsAllowed): Use bool instead of int.
This commit is contained in:
parent
922d498e1c
commit
4c4cc1b96d
@ -9536,7 +9536,8 @@ cleanup:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qemuDomainSnapshotIsAllowed(virDomainObjPtr vm)
|
static bool
|
||||||
|
qemuDomainSnapshotIsAllowed(virDomainObjPtr vm)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -9551,11 +9552,11 @@ static int qemuDomainSnapshotIsAllowed(virDomainObjPtr vm)
|
|||||||
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
||||||
_("Disk '%s' does not support snapshotting"),
|
_("Disk '%s' does not support snapshotting"),
|
||||||
vm->def->disks[i]->src);
|
vm->def->disks[i]->src);
|
||||||
return 0;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -9713,13 +9714,17 @@ endjob:
|
|||||||
|
|
||||||
static int
|
static int
|
||||||
qemuDomainSnapshotDiskPrepare(virDomainObjPtr vm, virDomainSnapshotDefPtr def,
|
qemuDomainSnapshotDiskPrepare(virDomainObjPtr vm, virDomainSnapshotDefPtr def,
|
||||||
bool allow_reuse)
|
unsigned int *flags)
|
||||||
{
|
{
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
int i;
|
int i;
|
||||||
bool found = false;
|
bool found = false;
|
||||||
bool active = virDomainObjIsActive(vm);
|
bool active = virDomainObjIsActive(vm);
|
||||||
struct stat st;
|
struct stat st;
|
||||||
|
bool allow_reuse = (*flags & VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) != 0;
|
||||||
|
bool atomic = (*flags & VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC) != 0;
|
||||||
|
int external = 0;
|
||||||
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
||||||
|
|
||||||
for (i = 0; i < def->ndisks; i++) {
|
for (i = 0; i < def->ndisks; i++) {
|
||||||
virDomainSnapshotDiskDefPtr disk = &def->disks[i];
|
virDomainSnapshotDiskDefPtr disk = &def->disks[i];
|
||||||
@ -9774,6 +9779,7 @@ qemuDomainSnapshotDiskPrepare(virDomainObjPtr vm, virDomainSnapshotDefPtr def,
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
found = true;
|
found = true;
|
||||||
|
external++;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case VIR_DOMAIN_DISK_SNAPSHOT_NO:
|
case VIR_DOMAIN_DISK_SNAPSHOT_NO:
|
||||||
@ -9793,6 +9799,17 @@ qemuDomainSnapshotDiskPrepare(virDomainObjPtr vm, virDomainSnapshotDefPtr def,
|
|||||||
"selected for snapshot"));
|
"selected for snapshot"));
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
if (active) {
|
||||||
|
if (external == 1 ||
|
||||||
|
qemuCapsGet(priv->qemuCaps, QEMU_CAPS_TRANSACTION)) {
|
||||||
|
*flags |= VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC;
|
||||||
|
} else if (atomic && external > 1) {
|
||||||
|
qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
|
||||||
|
_("atomic live snapshot of multiple disks "
|
||||||
|
"is unsupported"));
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
@ -9921,6 +9938,7 @@ qemuDomainSnapshotCreateDiskActive(virConnectPtr conn,
|
|||||||
int i;
|
int i;
|
||||||
bool persist = false;
|
bool persist = false;
|
||||||
int thaw = 0; /* 1 if freeze succeeded, -1 if freeze failed */
|
int thaw = 0; /* 1 if freeze succeeded, -1 if freeze failed */
|
||||||
|
bool atomic = (flags & VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC) != 0;
|
||||||
|
|
||||||
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
if (qemuDomainObjBeginJobWithDriver(driver, vm, QEMU_JOB_MODIFY) < 0)
|
||||||
return -1;
|
return -1;
|
||||||
@ -9945,14 +9963,14 @@ qemuDomainSnapshotCreateDiskActive(virConnectPtr conn,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
/* For multiple disks, libvirt must pause externally to get all
|
||||||
/* In qemu, snapshot_blkdev on a single disk will pause cpus,
|
* snapshots to be at the same point in time, unless qemu supports
|
||||||
* but this confuses libvirt since notifications are not given
|
* transactions. For a single disk, snapshot is atomic without
|
||||||
* when qemu resumes. And for multiple disks, libvirt must
|
* requiring a pause. Thanks to qemuDomainSnapshotDiskPrepare, if
|
||||||
* pause externally to get all snapshots to be at the same
|
* we got to this point, the atomic flag now says whether we need
|
||||||
* point in time. For simplicitly, we always pause ourselves
|
* to pause, and a capability bit says whether to use transaction.
|
||||||
* rather than relying on qemu doing pause.
|
*/
|
||||||
*/
|
if (!atomic && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
||||||
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
|
if (qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_SAVE,
|
||||||
QEMU_ASYNC_JOB_NONE) < 0)
|
QEMU_ASYNC_JOB_NONE) < 0)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
@ -10040,10 +10058,10 @@ endjob:
|
|||||||
ret = -1;
|
ret = -1;
|
||||||
}
|
}
|
||||||
if (vm && (qemuDomainObjEndJob(driver, vm) == 0)) {
|
if (vm && (qemuDomainObjEndJob(driver, vm) == 0)) {
|
||||||
/* Only possible if a transient vm quit while our locks were down,
|
/* Only possible if a transient vm quit while our locks were down,
|
||||||
* in which case we don't want to save snapshot metadata. */
|
* in which case we don't want to save snapshot metadata. */
|
||||||
*vmptr = NULL;
|
*vmptr = NULL;
|
||||||
ret = -1;
|
ret = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -10071,7 +10089,8 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
|
|||||||
VIR_DOMAIN_SNAPSHOT_CREATE_HALT |
|
VIR_DOMAIN_SNAPSHOT_CREATE_HALT |
|
||||||
VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
|
VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
|
||||||
VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT |
|
VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT |
|
||||||
VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE, NULL);
|
VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE |
|
||||||
|
VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC, NULL);
|
||||||
|
|
||||||
if ((flags & VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) &&
|
if ((flags & VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) &&
|
||||||
!(flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY)) {
|
!(flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY)) {
|
||||||
@ -10213,14 +10232,11 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
|
||||||
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY) {
|
if (flags & VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY) {
|
||||||
bool allow_reuse;
|
|
||||||
|
|
||||||
allow_reuse = (flags & VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) != 0;
|
|
||||||
if (virDomainSnapshotAlignDisks(def,
|
if (virDomainSnapshotAlignDisks(def,
|
||||||
VIR_DOMAIN_DISK_SNAPSHOT_EXTERNAL,
|
VIR_DOMAIN_DISK_SNAPSHOT_EXTERNAL,
|
||||||
false) < 0)
|
false) < 0)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
if (qemuDomainSnapshotDiskPrepare(vm, def, allow_reuse) < 0)
|
if (qemuDomainSnapshotDiskPrepare(vm, def, &flags) < 0)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
def->state = VIR_DOMAIN_DISK_SNAPSHOT;
|
def->state = VIR_DOMAIN_DISK_SNAPSHOT;
|
||||||
} else {
|
} else {
|
||||||
|
Loading…
Reference in New Issue
Block a user