qemu: Fix seamless spice migration

Calling qemuDomainMigrateGraphicsRelocate notifies spice clients to
connect to destination qemu so that they can seamlessly switch streams
once migration is done. Unfortunately, current qemu is not able to
accept any connections while incoming migration connection is open.
Thus, we need to delay opening the migration connection to the point
spice client is already connected to the destination qemu.
This commit is contained in:
Jiri Denemark 2012-02-02 16:34:08 +01:00
parent 8f0b03910c
commit d9d518b1c8

View File

@ -1381,6 +1381,7 @@ cleanup:
enum qemuMigrationDestinationType { enum qemuMigrationDestinationType {
MIGRATION_DEST_HOST, MIGRATION_DEST_HOST,
MIGRATION_DEST_CONNECT_HOST,
MIGRATION_DEST_UNIX, MIGRATION_DEST_UNIX,
MIGRATION_DEST_FD, MIGRATION_DEST_FD,
}; };
@ -1518,6 +1519,44 @@ cleanup:
return rv; return rv;
} }
static int
qemuMigrationConnect(struct qemud_driver *driver,
virDomainObjPtr vm,
qemuMigrationSpecPtr spec)
{
virNetSocketPtr sock;
const char *host;
char *port = NULL;
int ret = -1;
host = spec->dest.host.name;
if (virAsprintf(&port, "%d", spec->dest.host.port) < 0) {
virReportOOMError();
return -1;
}
spec->destType = MIGRATION_DEST_FD;
spec->dest.fd.qemu = -1;
if (virSecurityManagerSetSocketLabel(driver->securityManager, vm->def) < 0)
goto cleanup;
if (virNetSocketNewConnectTCP(host, port, &sock) == 0) {
spec->dest.fd.qemu = virNetSocketDupFD(sock, true);
virNetSocketFree(sock);
}
if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0 ||
spec->dest.fd.qemu == -1)
goto cleanup;
ret = 0;
cleanup:
VIR_FREE(port);
if (ret < 0)
VIR_FORCE_CLOSE(spec->dest.fd.qemu);
return ret;
}
static int static int
qemuMigrationRun(struct qemud_driver *driver, qemuMigrationRun(struct qemud_driver *driver,
virDomainObjPtr vm, virDomainObjPtr vm,
@ -1583,6 +1622,11 @@ qemuMigrationRun(struct qemud_driver *driver,
if (flags & VIR_MIGRATE_NON_SHARED_INC) if (flags & VIR_MIGRATE_NON_SHARED_INC)
migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC; migrate_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
/* connect to the destination qemu if needed */
if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
qemuMigrationConnect(driver, vm, spec) < 0)
goto cleanup;
switch (spec->destType) { switch (spec->destType) {
case MIGRATION_DEST_HOST: case MIGRATION_DEST_HOST:
ret = qemuMonitorMigrateToHost(priv->mon, migrate_flags, ret = qemuMonitorMigrateToHost(priv->mon, migrate_flags,
@ -1590,6 +1634,10 @@ qemuMigrationRun(struct qemud_driver *driver,
spec->dest.host.port); spec->dest.host.port);
break; break;
case MIGRATION_DEST_CONNECT_HOST:
/* handled above and transformed into MIGRATION_DEST_FD */
break;
case MIGRATION_DEST_UNIX: case MIGRATION_DEST_UNIX:
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX)) { if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX)) {
ret = qemuMonitorMigrateToUnix(priv->mon, migrate_flags, ret = qemuMonitorMigrateToUnix(priv->mon, migrate_flags,
@ -1712,7 +1760,6 @@ static int doNativeMigrate(struct qemud_driver *driver,
xmlURIPtr uribits = NULL; xmlURIPtr uribits = NULL;
int ret = -1; int ret = -1;
qemuMigrationSpec spec; qemuMigrationSpec spec;
char *tmp = NULL;
VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, " VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, "
"cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu", "cookieout=%p, cookieoutlen=%p, flags=%lx, resource=%lu",
@ -1720,6 +1767,7 @@ static int doNativeMigrate(struct qemud_driver *driver,
cookieout, cookieoutlen, flags, resource); cookieout, cookieoutlen, flags, resource);
if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) { if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) {
char *tmp;
/* HACK: source host generates bogus URIs, so fix them up */ /* HACK: source host generates bogus URIs, so fix them up */
if (virAsprintf(&tmp, "tcp://%s", uri + strlen("tcp:")) < 0) { if (virAsprintf(&tmp, "tcp://%s", uri + strlen("tcp:")) < 0) {
virReportOOMError(); virReportOOMError();
@ -1736,41 +1784,20 @@ static int doNativeMigrate(struct qemud_driver *driver,
return -1; return -1;
} }
spec.fwdType = MIGRATION_FWD_DIRECT; if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD))
spec.destType = MIGRATION_DEST_CONNECT_HOST;
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD)) { else
virNetSocketPtr sock;
spec.destType = MIGRATION_DEST_FD;
spec.dest.fd.qemu = -1;
if (virAsprintf(&tmp, "%d", uribits->port) < 0) {
virReportOOMError();
goto cleanup;
}
if (virSecurityManagerSetSocketLabel(driver->securityManager, vm->def) < 0)
goto cleanup;
if (virNetSocketNewConnectTCP(uribits->server, tmp, &sock) == 0) {
spec.dest.fd.qemu = virNetSocketDupFD(sock, true);
virNetSocketFree(sock);
}
if (virSecurityManagerClearSocketLabel(driver->securityManager, vm->def) < 0 ||
spec.dest.fd.qemu == -1)
goto cleanup;
} else {
spec.destType = MIGRATION_DEST_HOST; spec.destType = MIGRATION_DEST_HOST;
spec.dest.host.name = uribits->server; spec.dest.host.name = uribits->server;
spec.dest.host.port = uribits->port; spec.dest.host.port = uribits->port;
} spec.fwdType = MIGRATION_FWD_DIRECT;
ret = qemuMigrationRun(driver, vm, cookiein, cookieinlen, cookieout, ret = qemuMigrationRun(driver, vm, cookiein, cookieinlen, cookieout,
cookieoutlen, flags, resource, &spec, dconn); cookieoutlen, flags, resource, &spec, dconn);
cleanup:
if (spec.destType == MIGRATION_DEST_FD) if (spec.destType == MIGRATION_DEST_FD)
VIR_FORCE_CLOSE(spec.dest.fd.qemu); VIR_FORCE_CLOSE(spec.dest.fd.qemu);
VIR_FREE(tmp);
xmlFreeURI(uribits); xmlFreeURI(uribits);
return ret; return ret;