2011-01-31 04:47:03 -06:00
|
|
|
/*
|
|
|
|
* qemu_migration.c: QEMU migration handling
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006-2011 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
|
|
|
|
|
|
|
#include <sys/time.h>
|
2011-01-24 12:06:16 -06:00
|
|
|
#include <gnutls/gnutls.h>
|
|
|
|
#include <gnutls/x509.h>
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
#include "qemu_migration.h"
|
|
|
|
#include "qemu_monitor.h"
|
|
|
|
#include "qemu_domain.h"
|
|
|
|
#include "qemu_process.h"
|
|
|
|
#include "qemu_capabilities.h"
|
|
|
|
#include "qemu_audit.h"
|
2011-03-09 18:35:13 -06:00
|
|
|
#include "qemu_cgroup.h"
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
#include "logging.h"
|
|
|
|
#include "virterror_internal.h"
|
|
|
|
#include "memory.h"
|
|
|
|
#include "util.h"
|
|
|
|
#include "files.h"
|
|
|
|
#include "datatypes.h"
|
|
|
|
#include "fdstream.h"
|
2011-01-24 12:06:16 -06:00
|
|
|
#include "uuid.h"
|
2011-05-18 11:34:21 -05:00
|
|
|
#include "locking/domain_lock.h"
|
2011-01-24 12:06:16 -06:00
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
#define VIR_FROM_THIS VIR_FROM_QEMU
|
|
|
|
|
2011-02-17 07:17:59 -06:00
|
|
|
enum qemuMigrationCookieFlags {
|
2011-05-18 10:33:17 -05:00
|
|
|
QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS,
|
2011-05-18 11:34:21 -05:00
|
|
|
QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
|
2011-05-18 10:33:17 -05:00
|
|
|
|
|
|
|
QEMU_MIGRATION_COOKIE_FLAG_LAST
|
|
|
|
};
|
|
|
|
|
|
|
|
VIR_ENUM_DECL(qemuMigrationCookieFlag);
|
|
|
|
VIR_ENUM_IMPL(qemuMigrationCookieFlag,
|
|
|
|
QEMU_MIGRATION_COOKIE_FLAG_LAST,
|
2011-05-18 11:34:21 -05:00
|
|
|
"graphics", "lockstate");
|
2011-05-18 10:33:17 -05:00
|
|
|
|
|
|
|
enum qemuMigrationCookieFeatures {
|
|
|
|
QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
|
2011-05-18 11:34:21 -05:00
|
|
|
QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
|
2011-02-17 07:17:59 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
|
|
|
|
typedef qemuMigrationCookieGraphics *qemuMigrationCookieGraphicsPtr;
|
|
|
|
struct _qemuMigrationCookieGraphics {
|
|
|
|
int type;
|
|
|
|
int port;
|
|
|
|
int tlsPort;
|
|
|
|
char *listen;
|
|
|
|
char *tlsSubject;
|
|
|
|
};
|
|
|
|
|
2011-01-24 12:06:16 -06:00
|
|
|
typedef struct _qemuMigrationCookie qemuMigrationCookie;
|
|
|
|
typedef qemuMigrationCookie *qemuMigrationCookiePtr;
|
|
|
|
struct _qemuMigrationCookie {
|
|
|
|
int flags;
|
2011-05-18 10:33:17 -05:00
|
|
|
int flagsMandatory;
|
2011-01-24 12:06:16 -06:00
|
|
|
|
|
|
|
/* Host properties */
|
2011-05-23 10:42:15 -05:00
|
|
|
unsigned char localHostuuid[VIR_UUID_BUFLEN];
|
|
|
|
unsigned char remoteHostuuid[VIR_UUID_BUFLEN];
|
|
|
|
char *localHostname;
|
|
|
|
char *remoteHostname;
|
2011-01-24 12:06:16 -06:00
|
|
|
|
|
|
|
/* Guest properties */
|
|
|
|
unsigned char uuid[VIR_UUID_BUFLEN];
|
|
|
|
char *name;
|
2011-02-17 07:17:59 -06:00
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
/* If (flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) */
|
|
|
|
char *lockState;
|
|
|
|
char *lockDriver;
|
|
|
|
|
2011-02-17 07:17:59 -06:00
|
|
|
/* If (flags & QEMU_MIGRATION_COOKIE_GRAPHICS) */
|
|
|
|
qemuMigrationCookieGraphicsPtr graphics;
|
2011-01-24 12:06:16 -06:00
|
|
|
};
|
|
|
|
|
2011-02-17 07:17:59 -06:00
|
|
|
static void qemuMigrationCookieGraphicsFree(qemuMigrationCookieGraphicsPtr grap)
|
|
|
|
{
|
|
|
|
if (!grap)
|
|
|
|
return;
|
|
|
|
VIR_FREE(grap->listen);
|
|
|
|
VIR_FREE(grap->tlsSubject);
|
|
|
|
VIR_FREE(grap);
|
|
|
|
}
|
|
|
|
|
2011-01-24 12:06:16 -06:00
|
|
|
|
|
|
|
static void qemuMigrationCookieFree(qemuMigrationCookiePtr mig)
|
|
|
|
{
|
|
|
|
if (!mig)
|
|
|
|
return;
|
|
|
|
|
2011-02-17 07:17:59 -06:00
|
|
|
if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS)
|
|
|
|
qemuMigrationCookieGraphicsFree(mig->graphics);
|
|
|
|
|
2011-05-23 10:42:15 -05:00
|
|
|
VIR_FREE(mig->localHostname);
|
|
|
|
VIR_FREE(mig->remoteHostname);
|
2011-01-24 12:06:16 -06:00
|
|
|
VIR_FREE(mig->name);
|
2011-05-18 11:34:21 -05:00
|
|
|
VIR_FREE(mig->lockState);
|
|
|
|
VIR_FREE(mig->lockDriver);
|
2011-01-24 12:06:16 -06:00
|
|
|
VIR_FREE(mig);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-17 07:17:59 -06:00
|
|
|
static char *
|
|
|
|
qemuDomainExtractTLSSubject(const char *certdir)
|
|
|
|
{
|
|
|
|
char *certfile = NULL;
|
|
|
|
char *subject = NULL;
|
|
|
|
char *pemdata = NULL;
|
|
|
|
gnutls_datum_t pemdatum;
|
|
|
|
gnutls_x509_crt_t cert;
|
|
|
|
int ret;
|
|
|
|
size_t subjectlen;
|
|
|
|
|
|
|
|
if (virAsprintf(&certfile, "%s/server-cert.pem", certdir) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
if (virFileReadAll(certfile, 8192, &pemdata) < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unable to read server cert %s"), certfile);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = gnutls_x509_crt_init(&cert);
|
|
|
|
if (ret < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot initialize cert object: %s"),
|
|
|
|
gnutls_strerror(ret));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
pemdatum.data = (unsigned char *)pemdata;
|
|
|
|
pemdatum.size = strlen(pemdata);
|
|
|
|
|
|
|
|
ret = gnutls_x509_crt_import(cert, &pemdatum, GNUTLS_X509_FMT_PEM);
|
|
|
|
if (ret < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot load cert data from %s: %s"),
|
|
|
|
certfile, gnutls_strerror(ret));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
subjectlen = 1024;
|
|
|
|
if (VIR_ALLOC_N(subject, subjectlen+1) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
gnutls_x509_crt_get_dn(cert, subject, &subjectlen);
|
|
|
|
subject[subjectlen] = '\0';
|
|
|
|
|
|
|
|
VIR_FREE(certfile);
|
|
|
|
VIR_FREE(pemdata);
|
|
|
|
|
|
|
|
return subject;
|
|
|
|
|
|
|
|
no_memory:
|
|
|
|
virReportOOMError();
|
|
|
|
error:
|
|
|
|
VIR_FREE(certfile);
|
|
|
|
VIR_FREE(pemdata);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static qemuMigrationCookieGraphicsPtr
|
|
|
|
qemuMigrationCookieGraphicsAlloc(struct qemud_driver *driver,
|
|
|
|
virDomainGraphicsDefPtr def)
|
|
|
|
{
|
|
|
|
qemuMigrationCookieGraphicsPtr mig = NULL;
|
|
|
|
const char *listenAddr;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(mig) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
mig->type = def->type;
|
|
|
|
if (mig->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC) {
|
|
|
|
mig->port = def->data.vnc.port;
|
|
|
|
listenAddr = def->data.vnc.listenAddr;
|
|
|
|
if (!listenAddr)
|
|
|
|
listenAddr = driver->vncListen;
|
|
|
|
|
|
|
|
if (driver->vncTLS &&
|
|
|
|
!(mig->tlsSubject = qemuDomainExtractTLSSubject(driver->vncTLSx509certdir)))
|
|
|
|
goto error;
|
|
|
|
} else {
|
|
|
|
mig->port = def->data.spice.port;
|
|
|
|
if (driver->spiceTLS)
|
|
|
|
mig->tlsPort = def->data.spice.tlsPort;
|
|
|
|
else
|
|
|
|
mig->tlsPort = -1;
|
|
|
|
listenAddr = def->data.spice.listenAddr;
|
|
|
|
if (!listenAddr)
|
|
|
|
listenAddr = driver->spiceListen;
|
|
|
|
|
|
|
|
if (driver->spiceTLS &&
|
|
|
|
!(mig->tlsSubject = qemuDomainExtractTLSSubject(driver->spiceTLSx509certdir)))
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (!(mig->listen = strdup(listenAddr)))
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
return mig;
|
|
|
|
|
|
|
|
no_memory:
|
|
|
|
virReportOOMError();
|
|
|
|
error:
|
|
|
|
qemuMigrationCookieGraphicsFree(mig);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-24 12:06:16 -06:00
|
|
|
static qemuMigrationCookiePtr
|
|
|
|
qemuMigrationCookieNew(virDomainObjPtr dom)
|
|
|
|
{
|
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(mig) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
if (!(mig->name = strdup(dom->def->name)))
|
|
|
|
goto no_memory;
|
|
|
|
memcpy(mig->uuid, dom->def->uuid, VIR_UUID_BUFLEN);
|
|
|
|
|
2011-05-23 10:42:15 -05:00
|
|
|
if (!(mig->localHostname = virGetHostname(NULL)))
|
2011-01-24 12:06:16 -06:00
|
|
|
goto no_memory;
|
2011-05-23 10:42:15 -05:00
|
|
|
if (virGetHostUUID(mig->localHostuuid) < 0) {
|
2011-01-24 12:06:16 -06:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Unable to obtain host UUID"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
return mig;
|
|
|
|
|
|
|
|
no_memory:
|
|
|
|
virReportOOMError();
|
|
|
|
error:
|
|
|
|
qemuMigrationCookieFree(mig);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-17 07:17:59 -06:00
|
|
|
static int
|
|
|
|
qemuMigrationCookieAddGraphics(qemuMigrationCookiePtr mig,
|
|
|
|
struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr dom)
|
|
|
|
{
|
|
|
|
if (mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Migration graphics data already present"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dom->def->ngraphics == 1 &&
|
|
|
|
(dom->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_VNC ||
|
2011-05-17 03:54:22 -05:00
|
|
|
dom->def->graphics[0]->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE)) {
|
|
|
|
if (!(mig->graphics =
|
|
|
|
qemuMigrationCookieGraphicsAlloc(driver, dom->def->graphics[0])))
|
|
|
|
return -1;
|
|
|
|
mig->flags |= QEMU_MIGRATION_COOKIE_GRAPHICS;
|
|
|
|
}
|
2011-02-17 07:17:59 -06:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
static int
|
|
|
|
qemuMigrationCookieAddLockstate(qemuMigrationCookiePtr mig,
|
|
|
|
struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr dom)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = dom->privateData;
|
|
|
|
|
|
|
|
if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Migration lockstate data already present"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virDomainObjGetState(dom, NULL) == VIR_DOMAIN_PAUSED) {
|
|
|
|
if (priv->lockState &&
|
|
|
|
!(mig->lockState = strdup(priv->lockState)))
|
|
|
|
return -1;
|
|
|
|
} else {
|
|
|
|
if (virDomainLockProcessInquire(driver->lockManager, dom, &mig->lockState) < 0)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(mig->lockDriver = strdup(virLockManagerPluginGetName(driver->lockManager)))) {
|
|
|
|
VIR_FREE(mig->lockState);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
mig->flags |= QEMU_MIGRATION_COOKIE_LOCKSTATE;
|
|
|
|
mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_LOCKSTATE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2011-02-17 07:17:59 -06:00
|
|
|
static void qemuMigrationCookieGraphicsXMLFormat(virBufferPtr buf,
|
|
|
|
qemuMigrationCookieGraphicsPtr grap)
|
|
|
|
{
|
|
|
|
virBufferAsprintf(buf, " <graphics type='%s' port='%d' listen='%s'",
|
|
|
|
virDomainGraphicsTypeToString(grap->type),
|
|
|
|
grap->port, grap->listen);
|
|
|
|
if (grap->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE)
|
|
|
|
virBufferAsprintf(buf, " tlsPort='%d'", grap->tlsPort);
|
|
|
|
if (grap->tlsSubject) {
|
|
|
|
virBufferAddLit(buf, ">\n");
|
|
|
|
virBufferEscapeString(buf, " <cert info='subject' value='%s'/>\n", grap->tlsSubject);
|
|
|
|
virBufferAddLit(buf, " </graphics>\n");
|
|
|
|
} else {
|
|
|
|
virBufferAddLit(buf, "/>\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-24 12:06:16 -06:00
|
|
|
static void qemuMigrationCookieXMLFormat(virBufferPtr buf,
|
|
|
|
qemuMigrationCookiePtr mig)
|
|
|
|
{
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
char hostuuidstr[VIR_UUID_STRING_BUFLEN];
|
2011-05-18 10:33:17 -05:00
|
|
|
int i;
|
2011-01-24 12:06:16 -06:00
|
|
|
|
|
|
|
virUUIDFormat(mig->uuid, uuidstr);
|
2011-05-23 10:42:15 -05:00
|
|
|
virUUIDFormat(mig->localHostuuid, hostuuidstr);
|
2011-01-24 12:06:16 -06:00
|
|
|
|
|
|
|
virBufferAsprintf(buf, "<qemu-migration>\n");
|
|
|
|
virBufferEscapeString(buf, " <name>%s</name>\n", mig->name);
|
|
|
|
virBufferAsprintf(buf, " <uuid>%s</uuid>\n", uuidstr);
|
2011-05-23 10:42:15 -05:00
|
|
|
virBufferEscapeString(buf, " <hostname>%s</hostname>\n", mig->localHostname);
|
2011-01-24 12:06:16 -06:00
|
|
|
virBufferAsprintf(buf, " <hostuuid>%s</hostuuid>\n", hostuuidstr);
|
2011-02-17 07:17:59 -06:00
|
|
|
|
2011-05-18 10:33:17 -05:00
|
|
|
for (i = 0 ; i < QEMU_MIGRATION_COOKIE_FLAG_LAST ; i++) {
|
|
|
|
if (mig->flagsMandatory & (1 << i))
|
|
|
|
virBufferAsprintf(buf, " <feature name='%s'/>\n",
|
|
|
|
qemuMigrationCookieFlagTypeToString(i));
|
|
|
|
}
|
|
|
|
|
2011-05-17 03:54:22 -05:00
|
|
|
if ((mig->flags & QEMU_MIGRATION_COOKIE_GRAPHICS) &&
|
|
|
|
mig->graphics)
|
2011-02-17 07:17:59 -06:00
|
|
|
qemuMigrationCookieGraphicsXMLFormat(buf, mig->graphics);
|
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if ((mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) &&
|
|
|
|
mig->lockState) {
|
|
|
|
virBufferAsprintf(buf, " <lockstate driver='%s'>\n",
|
|
|
|
mig->lockDriver);
|
|
|
|
virBufferAsprintf(buf, " <leases>%s</leases>\n",
|
|
|
|
mig->lockState);
|
|
|
|
virBufferAddLit(buf, " </lockstate>\n");
|
|
|
|
}
|
|
|
|
|
2011-01-24 12:06:16 -06:00
|
|
|
virBufferAddLit(buf, "</qemu-migration>\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static char *qemuMigrationCookieXMLFormatStr(qemuMigrationCookiePtr mig)
|
|
|
|
{
|
|
|
|
virBuffer buf = VIR_BUFFER_INITIALIZER;
|
|
|
|
|
|
|
|
qemuMigrationCookieXMLFormat(&buf, mig);
|
|
|
|
|
|
|
|
if (virBufferError(&buf)) {
|
|
|
|
virReportOOMError();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return virBufferContentAndReset(&buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-17 07:17:59 -06:00
|
|
|
static qemuMigrationCookieGraphicsPtr
|
|
|
|
qemuMigrationCookieGraphicsXMLParse(xmlXPathContextPtr ctxt)
|
|
|
|
{
|
|
|
|
qemuMigrationCookieGraphicsPtr grap;
|
|
|
|
char *tmp;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(grap) < 0)
|
|
|
|
goto no_memory;
|
|
|
|
|
|
|
|
if (!(tmp = virXPathString("string(./graphics/@type)", ctxt))) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing type attribute in migration data"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if ((grap->type = virDomainGraphicsTypeFromString(tmp)) < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("unknown graphics type %s"), tmp);
|
|
|
|
VIR_FREE(tmp);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (virXPathInt("string(./graphics/@port)", ctxt, &grap->port) < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing port attribute in migration data"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (grap->type == VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
|
|
|
|
if (virXPathInt("string(./graphics/@tlsPort)", ctxt, &grap->tlsPort) < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing tlsPort attribute in migration data"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!(grap->listen = virXPathString("string(./graphics/@listen)", ctxt))) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing listen attribute in migration data"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
/* Optional */
|
2011-05-18 04:57:07 -05:00
|
|
|
grap->tlsSubject = virXPathString("string(./graphics/cert[@info='subject']/@value)", ctxt);
|
2011-02-17 07:17:59 -06:00
|
|
|
|
|
|
|
|
|
|
|
return grap;
|
|
|
|
|
|
|
|
no_memory:
|
|
|
|
virReportOOMError();
|
|
|
|
error:
|
|
|
|
qemuMigrationCookieGraphicsFree(grap);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-24 12:06:16 -06:00
|
|
|
static int
|
|
|
|
qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
|
|
|
|
xmlXPathContextPtr ctxt,
|
2011-02-17 07:17:59 -06:00
|
|
|
int flags)
|
2011-01-24 12:06:16 -06:00
|
|
|
{
|
|
|
|
char uuidstr[VIR_UUID_STRING_BUFLEN];
|
|
|
|
char *tmp;
|
2011-05-18 10:33:17 -05:00
|
|
|
xmlNodePtr *nodes = NULL;
|
|
|
|
int i, n;
|
2011-01-24 12:06:16 -06:00
|
|
|
|
|
|
|
/* We don't store the uuid, name, hostname, or hostuuid
|
|
|
|
* values. We just compare them to local data to do some
|
|
|
|
* sanity checking on migration operation
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Extract domain name */
|
|
|
|
if (!(tmp = virXPathString("string(./name[1])", ctxt))) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing name element in migration data"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (STRNEQ(tmp, mig->name)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Incoming cookie data had unexpected name %s vs %s"),
|
|
|
|
tmp, mig->name);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
VIR_FREE(tmp);
|
|
|
|
|
|
|
|
/* Extract domain uuid */
|
|
|
|
tmp = virXPathString("string(./uuid[1])", ctxt);
|
|
|
|
if (!tmp) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing uuid element in migration data"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
virUUIDFormat(mig->uuid, uuidstr);
|
|
|
|
if (STRNEQ(tmp, uuidstr)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Incoming cookie data had unexpected UUID %s vs %s"),
|
|
|
|
tmp, uuidstr);
|
|
|
|
}
|
|
|
|
VIR_FREE(tmp);
|
|
|
|
|
|
|
|
/* Check & forbid "localhost" migration */
|
2011-05-23 10:42:15 -05:00
|
|
|
if (!(mig->remoteHostname = virXPathString("string(./hostname[1])", ctxt))) {
|
2011-01-24 12:06:16 -06:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing hostname element in migration data"));
|
|
|
|
goto error;
|
|
|
|
}
|
2011-05-23 10:42:15 -05:00
|
|
|
if (STREQ(mig->remoteHostname, mig->localHostname)) {
|
2011-01-24 12:06:16 -06:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Attempt to migrate guest to the same host %s"),
|
2011-05-23 10:42:15 -05:00
|
|
|
mig->remoteHostname);
|
2011-01-24 12:06:16 -06:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(tmp = virXPathString("string(./hostuuid[1])", ctxt))) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing hostuuid element in migration data"));
|
|
|
|
goto error;
|
|
|
|
}
|
2011-05-23 10:42:15 -05:00
|
|
|
if (virUUIDParse(tmp, mig->remoteHostuuid) < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("malformed hostuuid element in migration data"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
if (memcmp(mig->remoteHostuuid, mig->localHostuuid, VIR_UUID_BUFLEN) == 0) {
|
2011-01-24 12:06:16 -06:00
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Attempt to migrate guest to the same host %s"),
|
|
|
|
tmp);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
VIR_FREE(tmp);
|
|
|
|
|
2011-05-18 10:33:17 -05:00
|
|
|
/* Check to ensure all mandatory features from XML are also
|
|
|
|
* present in 'flags' */
|
|
|
|
if ((n = virXPathNodeSet("./features", ctxt, &nodes)) < 0)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
for (i = 0 ; i < n ; i++) {
|
|
|
|
int val;
|
|
|
|
char *str = virXMLPropString(nodes[i], "name");
|
|
|
|
if (!str) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("missing feature name"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((val = qemuMigrationCookieFlagTypeFromString(str)) < 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unknown migration cookie feature %s"),
|
|
|
|
str);
|
|
|
|
VIR_FREE(str);
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((flags & (1 << val)) == 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unsupported migration cookie feature %s"),
|
|
|
|
str);
|
|
|
|
VIR_FREE(str);
|
|
|
|
}
|
|
|
|
VIR_FREE(str);
|
|
|
|
}
|
|
|
|
VIR_FREE(nodes);
|
|
|
|
|
2011-02-17 07:17:59 -06:00
|
|
|
if ((flags & QEMU_MIGRATION_COOKIE_GRAPHICS) &&
|
|
|
|
virXPathBoolean("count(./graphics) > 0", ctxt) &&
|
|
|
|
(!(mig->graphics = qemuMigrationCookieGraphicsXMLParse(ctxt))))
|
|
|
|
goto error;
|
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if ((flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) &&
|
|
|
|
virXPathBoolean("count(./lockstate) > 0", ctxt)) {
|
|
|
|
mig->lockDriver = virXPathString("string(./lockstate[1]/@driver)", ctxt);
|
|
|
|
if (!mig->lockDriver) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Missing lock driver name in migration cookie"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
mig->lockState = virXPathString("string(./lockstate[1]/leases[1])", ctxt);
|
|
|
|
if (mig->lockState && STREQ(mig->lockState, ""))
|
|
|
|
VIR_FREE(mig->lockState);
|
|
|
|
}
|
|
|
|
|
2011-01-24 12:06:16 -06:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
VIR_FREE(tmp);
|
2011-05-18 10:33:17 -05:00
|
|
|
VIR_FREE(nodes);
|
2011-01-24 12:06:16 -06:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuMigrationCookieXMLParseStr(qemuMigrationCookiePtr mig,
|
|
|
|
const char *xml,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
xmlDocPtr doc = NULL;
|
|
|
|
xmlXPathContextPtr ctxt = NULL;
|
2011-05-16 10:10:35 -05:00
|
|
|
int ret = -1;
|
2011-01-24 12:06:16 -06:00
|
|
|
|
|
|
|
VIR_DEBUG("xml=%s", NULLSTR(xml));
|
|
|
|
|
|
|
|
if (!(doc = virXMLParseString(xml, "qemumigration.xml")))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if ((ctxt = xmlXPathNewContext(doc)) == NULL) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctxt->node = xmlDocGetRootElement(doc);
|
|
|
|
|
|
|
|
ret = qemuMigrationCookieXMLParse(mig, ctxt, flags);
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
xmlXPathFreeContext(ctxt);
|
|
|
|
xmlFreeDoc(doc);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
|
2011-02-17 07:17:59 -06:00
|
|
|
struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr dom,
|
2011-01-24 12:06:16 -06:00
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
2011-02-17 07:17:59 -06:00
|
|
|
int flags)
|
2011-01-24 12:06:16 -06:00
|
|
|
{
|
2011-06-04 05:14:05 -05:00
|
|
|
if (!cookieout || !cookieoutlen)
|
|
|
|
return 0;
|
2011-01-24 12:06:16 -06:00
|
|
|
|
|
|
|
*cookieoutlen = 0;
|
|
|
|
|
2011-02-17 07:17:59 -06:00
|
|
|
if (flags & QEMU_MIGRATION_COOKIE_GRAPHICS &&
|
|
|
|
qemuMigrationCookieAddGraphics(mig, driver, dom) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if (flags & QEMU_MIGRATION_COOKIE_LOCKSTATE &&
|
|
|
|
qemuMigrationCookieAddLockstate(mig, driver, dom) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2011-01-24 12:06:16 -06:00
|
|
|
if (!(*cookieout = qemuMigrationCookieXMLFormatStr(mig)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
*cookieoutlen = strlen(*cookieout) + 1;
|
|
|
|
|
|
|
|
VIR_DEBUG("cookielen=%d cookie=%s", *cookieoutlen, *cookieout);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static qemuMigrationCookiePtr
|
2011-05-18 11:34:21 -05:00
|
|
|
qemuMigrationEatCookie(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr dom,
|
2011-01-24 12:06:16 -06:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
|
|
|
|
|
|
|
/* Parse & validate incoming cookie (if any) */
|
|
|
|
if (cookiein && cookieinlen &&
|
|
|
|
cookiein[cookieinlen-1] != '\0') {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("Migration cookie was not NULL terminated"));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_DEBUG("cookielen=%d cookie='%s'", cookieinlen, NULLSTR(cookiein));
|
|
|
|
|
|
|
|
if (!(mig = qemuMigrationCookieNew(dom)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (cookiein && cookieinlen &&
|
|
|
|
qemuMigrationCookieXMLParseStr(mig,
|
|
|
|
cookiein,
|
|
|
|
flags) < 0)
|
|
|
|
goto error;
|
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if (mig->flags & QEMU_MIGRATION_COOKIE_LOCKSTATE) {
|
|
|
|
if (!mig->lockDriver) {
|
|
|
|
if (virLockManagerPluginUsesState(driver->lockManager)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Missing %s lock state for migration cookie"),
|
|
|
|
virLockManagerPluginGetName(driver->lockManager));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
} else if (STRNEQ(mig->lockDriver,
|
|
|
|
virLockManagerPluginGetName(driver->lockManager))) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Source host lock driver %s different from target %s"),
|
|
|
|
mig->lockDriver,
|
|
|
|
virLockManagerPluginGetName(driver->lockManager));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-24 12:06:16 -06:00
|
|
|
return mig;
|
|
|
|
|
|
|
|
error:
|
|
|
|
qemuMigrationCookieFree(mig);
|
|
|
|
return NULL;
|
|
|
|
}
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
bool
|
|
|
|
qemuMigrationIsAllowed(virDomainDefPtr def)
|
|
|
|
{
|
|
|
|
if (def->nhostdevs > 0) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("Domain with assigned host devices cannot be migrated"));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** qemuMigrationSetOffline
|
|
|
|
* Pause domain for non-live migration.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuMigrationSetOffline(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm)
|
|
|
|
{
|
|
|
|
int ret;
|
2011-06-02 10:40:33 -05:00
|
|
|
VIR_DEBUG("driver=%p vm=%p", driver, vm);
|
2011-05-04 04:07:01 -05:00
|
|
|
ret = qemuProcessStopCPUs(driver, vm, VIR_DOMAIN_PAUSED_MIGRATION);
|
2011-01-31 04:47:03 -06:00
|
|
|
if (ret == 0) {
|
|
|
|
virDomainEventPtr event;
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-04-20 11:33:27 -05:00
|
|
|
static int
|
|
|
|
qemuMigrationProcessJobSignals(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-05-13 05:11:47 -05:00
|
|
|
const char *job,
|
|
|
|
bool cleanup)
|
2011-04-20 11:33:27 -05:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
|
|
|
|
job, _("guest unexpectedly quit"));
|
2011-05-13 05:11:47 -05:00
|
|
|
if (cleanup)
|
|
|
|
priv->jobSignals = 0;
|
2011-04-20 11:33:27 -05:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->jobSignals & QEMU_JOB_SIGNAL_CANCEL) {
|
|
|
|
priv->jobSignals ^= QEMU_JOB_SIGNAL_CANCEL;
|
|
|
|
VIR_DEBUG("Cancelling job at client request");
|
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
ret = qemuMonitorMigrateCancel(priv->mon);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
if (ret < 0) {
|
|
|
|
VIR_WARN("Unable to cancel job");
|
|
|
|
}
|
|
|
|
} else if (priv->jobSignals & QEMU_JOB_SIGNAL_SUSPEND) {
|
|
|
|
priv->jobSignals ^= QEMU_JOB_SIGNAL_SUSPEND;
|
|
|
|
VIR_DEBUG("Pausing domain for non-live migration");
|
|
|
|
if (qemuMigrationSetOffline(driver, vm) < 0)
|
|
|
|
VIR_WARN("Unable to pause domain");
|
|
|
|
} else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME) {
|
|
|
|
unsigned long long ms = priv->jobSignalsData.migrateDowntime;
|
|
|
|
|
|
|
|
priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME;
|
|
|
|
priv->jobSignalsData.migrateDowntime = 0;
|
|
|
|
VIR_DEBUG("Setting migration downtime to %llums", ms);
|
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
ret = qemuMonitorSetMigrationDowntime(priv->mon, ms);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
if (ret < 0)
|
|
|
|
VIR_WARN("Unable to set migration downtime");
|
|
|
|
} else if (priv->jobSignals & QEMU_JOB_SIGNAL_MIGRATE_SPEED) {
|
|
|
|
unsigned long bandwidth = priv->jobSignalsData.migrateBandwidth;
|
|
|
|
|
|
|
|
priv->jobSignals ^= QEMU_JOB_SIGNAL_MIGRATE_SPEED;
|
|
|
|
priv->jobSignalsData.migrateBandwidth = 0;
|
|
|
|
VIR_DEBUG("Setting migration bandwidth to %luMbs", bandwidth);
|
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
ret = qemuMonitorSetMigrationSpeed(priv->mon, bandwidth);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
if (ret < 0)
|
|
|
|
VIR_WARN("Unable to set migration speed");
|
2011-05-13 05:11:47 -05:00
|
|
|
} else if (priv->jobSignals & QEMU_JOB_SIGNAL_BLKSTAT) {
|
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
ret = qemuMonitorGetBlockStatsInfo(priv->mon,
|
|
|
|
priv->jobSignalsData.statDevName,
|
|
|
|
&priv->jobSignalsData.blockStat->rd_req,
|
|
|
|
&priv->jobSignalsData.blockStat->rd_bytes,
|
|
|
|
&priv->jobSignalsData.blockStat->wr_req,
|
|
|
|
&priv->jobSignalsData.blockStat->wr_bytes,
|
|
|
|
&priv->jobSignalsData.blockStat->errs);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
|
|
|
*priv->jobSignalsData.statRetCode = ret;
|
|
|
|
priv->jobSignals ^= QEMU_JOB_SIGNAL_BLKSTAT;
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
VIR_WARN("Unable to get block statistics");
|
|
|
|
} else if (priv->jobSignals & QEMU_JOB_SIGNAL_BLKINFO) {
|
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
ret = qemuMonitorGetBlockExtent(priv->mon,
|
|
|
|
priv->jobSignalsData.infoDevName,
|
|
|
|
&priv->jobSignalsData.blockInfo->allocation);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
|
|
|
*priv->jobSignalsData.infoRetCode = ret;
|
|
|
|
priv->jobSignals ^= QEMU_JOB_SIGNAL_BLKINFO;
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
VIR_WARN("Unable to get block information");
|
2011-04-20 11:33:27 -05:00
|
|
|
} else {
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuMigrationUpdateJobStatus(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *job)
|
2011-01-31 04:47:03 -06:00
|
|
|
{
|
2011-04-20 11:33:27 -05:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-01-31 04:47:03 -06:00
|
|
|
int ret = -1;
|
|
|
|
int status;
|
|
|
|
unsigned long long memProcessed;
|
|
|
|
unsigned long long memRemaining;
|
|
|
|
unsigned long long memTotal;
|
2011-04-20 11:33:27 -05:00
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, _("%s: %s"),
|
|
|
|
job, _("guest unexpectedly quit"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
ret = qemuMonitorGetMigrationStatus(priv->mon,
|
|
|
|
&status,
|
|
|
|
&memProcessed,
|
|
|
|
&memRemaining,
|
|
|
|
&memTotal);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
2011-06-01 05:35:18 -05:00
|
|
|
if (ret < 0 || virTimeMs(&priv->jobInfo.timeElapsed) < 0) {
|
2011-04-20 11:33:27 -05:00
|
|
|
priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
|
|
|
|
return -1;
|
|
|
|
}
|
2011-06-01 05:35:18 -05:00
|
|
|
priv->jobInfo.timeElapsed -= priv->jobStart;
|
2011-04-20 11:33:27 -05:00
|
|
|
|
|
|
|
switch (status) {
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE:
|
|
|
|
priv->jobInfo.type = VIR_DOMAIN_JOB_NONE;
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("%s: %s"), job, _("is not active"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE:
|
|
|
|
priv->jobInfo.dataTotal = memTotal;
|
|
|
|
priv->jobInfo.dataRemaining = memRemaining;
|
|
|
|
priv->jobInfo.dataProcessed = memProcessed;
|
|
|
|
|
|
|
|
priv->jobInfo.memTotal = memTotal;
|
|
|
|
priv->jobInfo.memRemaining = memRemaining;
|
|
|
|
priv->jobInfo.memProcessed = memProcessed;
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED:
|
|
|
|
priv->jobInfo.type = VIR_DOMAIN_JOB_COMPLETED;
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_ERROR:
|
|
|
|
priv->jobInfo.type = VIR_DOMAIN_JOB_FAILED;
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("%s: %s"), job, _("unexpectedly failed"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED:
|
|
|
|
priv->jobInfo.type = VIR_DOMAIN_JOB_CANCELLED;
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("%s: %s"), job, _("canceled by client"));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuMigrationWaitForCompletion(struct qemud_driver *driver, virDomainObjPtr vm)
|
|
|
|
{
|
2011-01-31 04:47:03 -06:00
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-05-13 05:11:47 -05:00
|
|
|
const char *job;
|
|
|
|
|
|
|
|
switch (priv->jobActive) {
|
|
|
|
case QEMU_JOB_MIGRATION_OUT:
|
|
|
|
job = _("migration job");
|
|
|
|
break;
|
|
|
|
case QEMU_JOB_SAVE:
|
|
|
|
job = _("domain save job");
|
|
|
|
break;
|
|
|
|
case QEMU_JOB_DUMP:
|
|
|
|
job = _("domain core dump job");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
job = _("job");
|
|
|
|
}
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
|
|
|
|
|
|
|
|
while (priv->jobInfo.type == VIR_DOMAIN_JOB_UNBOUNDED) {
|
|
|
|
/* Poll every 50ms for progress & to allow cancellation */
|
|
|
|
struct timespec ts = { .tv_sec = 0, .tv_nsec = 50 * 1000 * 1000ull };
|
2011-05-13 05:11:47 -05:00
|
|
|
while (priv->jobSignals) {
|
|
|
|
if (qemuMigrationProcessJobSignals(driver, vm, job, false) < 0)
|
|
|
|
goto cleanup;
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
|
|
|
|
2011-05-13 05:11:47 -05:00
|
|
|
virCondSignal(&priv->signalCond);
|
2011-01-31 04:47:03 -06:00
|
|
|
|
2011-04-20 11:33:27 -05:00
|
|
|
if (qemuMigrationUpdateJobStatus(driver, vm, job) < 0)
|
2011-01-31 04:47:03 -06:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuDriverUnlock(driver);
|
|
|
|
|
|
|
|
nanosleep(&ts, NULL);
|
|
|
|
|
|
|
|
qemuDriverLock(driver);
|
|
|
|
virDomainObjLock(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
2011-05-13 05:11:47 -05:00
|
|
|
while (priv->jobSignals) {
|
|
|
|
qemuMigrationProcessJobSignals(driver, vm, job, true);
|
|
|
|
}
|
|
|
|
virCondBroadcast(&priv->signalCond);
|
|
|
|
|
2011-04-20 11:33:27 -05:00
|
|
|
if (priv->jobInfo.type == VIR_DOMAIN_JOB_COMPLETED)
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return -1;
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-17 07:39:36 -06:00
|
|
|
static int
|
|
|
|
qemuDomainMigrateGraphicsRelocate(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
qemuMigrationCookiePtr cookie)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cookie)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!cookie->graphics)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* QEMU doesn't support VNC relocation yet, so
|
|
|
|
* skip it to avoid generating an error
|
|
|
|
*/
|
|
|
|
if (cookie->graphics->type != VIR_DOMAIN_GRAPHICS_TYPE_SPICE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
ret = qemuMonitorGraphicsRelocate(priv->mon,
|
|
|
|
cookie->graphics->type,
|
2011-05-23 10:42:15 -05:00
|
|
|
cookie->remoteHostname,
|
2011-02-17 07:39:36 -06:00
|
|
|
cookie->graphics->port,
|
|
|
|
cookie->graphics->tlsPort,
|
|
|
|
cookie->graphics->tlsSubject);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-03 05:09:28 -06:00
|
|
|
char *qemuMigrationBegin(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-05-18 04:26:30 -05:00
|
|
|
const char *xmlin,
|
2011-02-03 05:09:28 -06:00
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen)
|
|
|
|
{
|
|
|
|
char *rv = NULL;
|
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
2011-05-27 05:30:26 -05:00
|
|
|
virDomainDefPtr def = NULL;
|
2011-05-20 05:03:04 -05:00
|
|
|
VIR_DEBUG("driver=%p, vm=%p, xmlin=%s, cookieout=%p, cookieoutlen=%p",
|
|
|
|
driver, vm, NULLSTR(xmlin), cookieout, cookieoutlen);
|
2011-02-03 05:09:28 -06:00
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!qemuMigrationIsAllowed(vm->def))
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if (!(mig = qemuMigrationEatCookie(driver, vm, NULL, 0, 0)))
|
2011-02-03 05:09:28 -06:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuMigrationBakeCookie(mig, driver, vm,
|
|
|
|
cookieout, cookieoutlen,
|
2011-05-18 11:34:21 -05:00
|
|
|
QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0)
|
2011-02-03 05:09:28 -06:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-05-27 05:30:26 -05:00
|
|
|
if (xmlin) {
|
|
|
|
if (!(def = virDomainDefParseString(driver->caps, xmlin,
|
|
|
|
VIR_DOMAIN_XML_INACTIVE)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!virDomainDefCheckABIStability(def, vm->def))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
rv = qemuDomainDefFormatXML(driver, def,
|
|
|
|
VIR_DOMAIN_XML_SECURE |
|
|
|
|
VIR_DOMAIN_XML_UPDATE_CPU);
|
|
|
|
} else {
|
|
|
|
rv = qemuDomainFormatXML(driver, vm,
|
|
|
|
VIR_DOMAIN_XML_SECURE |
|
|
|
|
VIR_DOMAIN_XML_UPDATE_CPU);
|
|
|
|
}
|
2011-02-03 05:09:28 -06:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
qemuMigrationCookieFree(mig);
|
2011-05-27 05:30:26 -05:00
|
|
|
virDomainDefFree(def);
|
2011-02-03 05:09:28 -06:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
/* Prepare is the first step, and it runs on the destination host.
|
|
|
|
*
|
|
|
|
* This version starts an empty VM listening on a localhost TCP port, and
|
|
|
|
* sets up the corresponding virStream to handle the incoming data.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
qemuMigrationPrepareTunnel(struct qemud_driver *driver,
|
|
|
|
virConnectPtr dconn,
|
2011-01-24 12:06:16 -06:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
2011-01-31 04:47:03 -06:00
|
|
|
virStreamPtr st,
|
|
|
|
const char *dname,
|
|
|
|
const char *dom_xml)
|
|
|
|
{
|
|
|
|
virDomainDefPtr def = NULL;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
int internalret;
|
2010-12-23 12:24:42 -06:00
|
|
|
int dataFD[2] = { -1, -1 };
|
2011-01-31 04:47:03 -06:00
|
|
|
qemuDomainObjPrivatePtr priv = NULL;
|
2011-06-01 05:35:18 -05:00
|
|
|
unsigned long long now;
|
2011-01-24 12:06:16 -06:00
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
2011-06-01 05:35:18 -05:00
|
|
|
|
2011-05-20 05:03:04 -05:00
|
|
|
VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
|
|
|
|
"cookieout=%p, cookieoutlen=%p, st=%p, dname=%s, dom_xml=%s",
|
|
|
|
driver, dconn, NULLSTR(cookiein), cookieinlen,
|
|
|
|
cookieout, cookieoutlen, st, NULLSTR(dname), dom_xml);
|
2011-01-31 04:47:03 -06:00
|
|
|
|
2011-06-01 05:35:18 -05:00
|
|
|
if (virTimeMs(&now) < 0)
|
2011-01-31 04:47:03 -06:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Parse the domain XML. */
|
|
|
|
if (!(def = virDomainDefParseString(driver->caps, dom_xml,
|
|
|
|
VIR_DOMAIN_XML_INACTIVE)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!qemuMigrationIsAllowed(def))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Target domain name, maybe renamed. */
|
|
|
|
if (dname) {
|
|
|
|
VIR_FREE(def->name);
|
|
|
|
def->name = strdup(dname);
|
|
|
|
if (def->name == NULL)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!(vm = virDomainAssignDef(driver->caps,
|
|
|
|
&driver->domains,
|
|
|
|
def, true))) {
|
|
|
|
/* virDomainAssignDef already set the error */
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
def = NULL;
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
|
|
|
|
QEMU_MIGRATION_COOKIE_LOCKSTATE)))
|
2011-01-24 12:06:16 -06:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
priv->jobActive = QEMU_JOB_MIGRATION_OUT;
|
|
|
|
|
|
|
|
/* Domain starts inactive, even if the domain XML had an id field. */
|
|
|
|
vm->def->id = -1;
|
|
|
|
|
2010-12-23 12:24:42 -06:00
|
|
|
if (pipe(dataFD) < 0 ||
|
2011-04-21 10:02:40 -05:00
|
|
|
virSetCloseExec(dataFD[1]) < 0) {
|
2010-12-23 12:24:42 -06:00
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("cannot create pipe for tunnelled migration"));
|
2011-01-31 04:47:03 -06:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Start the QEMU daemon, with the same command-line arguments plus
|
2011-04-20 05:20:12 -05:00
|
|
|
* -incoming stdio (which qemu_command might convert to exec:cat or fd:n)
|
2011-01-31 04:47:03 -06:00
|
|
|
*/
|
2011-06-23 04:37:57 -05:00
|
|
|
internalret = qemuProcessStart(dconn, driver, vm, "stdio", true,
|
|
|
|
false, dataFD[0], NULL,
|
|
|
|
VIR_VM_OP_MIGRATE_IN_START);
|
2011-01-31 04:47:03 -06:00
|
|
|
if (internalret < 0) {
|
2011-03-08 15:28:51 -06:00
|
|
|
qemuAuditDomainStart(vm, "migrated", false);
|
2011-01-31 04:47:03 -06:00
|
|
|
/* Note that we don't set an error here because qemuProcessStart
|
|
|
|
* should have already done that.
|
|
|
|
*/
|
|
|
|
if (!vm->persistent) {
|
|
|
|
virDomainRemoveInactive(&driver->domains, vm);
|
|
|
|
vm = NULL;
|
|
|
|
}
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-04-21 10:02:40 -05:00
|
|
|
if (virFDStreamOpen(st, dataFD[1]) < 0) {
|
2011-03-08 15:28:51 -06:00
|
|
|
qemuAuditDomainStart(vm, "migrated", false);
|
2011-05-04 04:07:01 -05:00
|
|
|
qemuProcessStop(driver, vm, 0, VIR_DOMAIN_SHUTOFF_FAILED);
|
2011-01-31 04:47:03 -06:00
|
|
|
if (!vm->persistent) {
|
|
|
|
if (qemuDomainObjEndJob(vm) > 0)
|
|
|
|
virDomainRemoveInactive(&driver->domains, vm);
|
|
|
|
vm = NULL;
|
|
|
|
}
|
2010-12-23 12:24:42 -06:00
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("cannot pass pipe for tunnelled migration"));
|
2011-01-31 04:47:03 -06:00
|
|
|
goto endjob;
|
|
|
|
}
|
2011-04-21 10:02:40 -05:00
|
|
|
dataFD[1] = -1; /* 'st' owns the FD now & will close it */
|
2011-01-31 04:47:03 -06:00
|
|
|
|
2011-03-08 15:28:51 -06:00
|
|
|
qemuAuditDomainStart(vm, "migrated", true);
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED_MIGRATED);
|
2011-01-24 12:06:16 -06:00
|
|
|
|
2011-02-17 07:17:59 -06:00
|
|
|
if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen,
|
|
|
|
QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) {
|
2011-01-24 12:06:16 -06:00
|
|
|
/* We could tear down the whole guest here, but
|
|
|
|
* cookie data is (so far) non-critical, so that
|
|
|
|
* seems a little harsh. We'll just warn for now.
|
|
|
|
*/
|
|
|
|
VIR_WARN("Unable to encode migration cookie");
|
|
|
|
}
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
endjob:
|
|
|
|
if (vm &&
|
|
|
|
qemuDomainObjEndJob(vm) == 0)
|
|
|
|
vm = NULL;
|
|
|
|
|
|
|
|
/* We set a fake job active which is held across
|
|
|
|
* API calls until the finish() call. This prevents
|
|
|
|
* any other APIs being invoked while incoming
|
|
|
|
* migration is taking place
|
|
|
|
*/
|
|
|
|
if (vm &&
|
|
|
|
virDomainObjIsActive(vm)) {
|
|
|
|
priv->jobActive = QEMU_JOB_MIGRATION_IN;
|
|
|
|
priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
|
2011-06-01 05:35:18 -05:00
|
|
|
priv->jobStart = now;
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
virDomainDefFree(def);
|
2010-12-23 12:24:42 -06:00
|
|
|
VIR_FORCE_CLOSE(dataFD[0]);
|
|
|
|
VIR_FORCE_CLOSE(dataFD[1]);
|
2011-01-31 04:47:03 -06:00
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-01-24 12:06:16 -06:00
|
|
|
qemuMigrationCookieFree(mig);
|
2011-01-31 04:47:03 -06:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
qemuMigrationPrepareDirect(struct qemud_driver *driver,
|
|
|
|
virConnectPtr dconn,
|
2011-01-24 12:06:16 -06:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
2011-01-31 04:47:03 -06:00
|
|
|
const char *uri_in,
|
|
|
|
char **uri_out,
|
|
|
|
const char *dname,
|
|
|
|
const char *dom_xml)
|
|
|
|
{
|
|
|
|
static int port = 0;
|
|
|
|
virDomainDefPtr def = NULL;
|
|
|
|
virDomainObjPtr vm = NULL;
|
|
|
|
int this_port;
|
|
|
|
char *hostname = NULL;
|
|
|
|
char migrateFrom [64];
|
|
|
|
const char *p;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
int internalret;
|
|
|
|
qemuDomainObjPrivatePtr priv = NULL;
|
2011-06-01 05:35:18 -05:00
|
|
|
unsigned long long now;
|
2011-01-24 12:06:16 -06:00
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
2011-06-01 05:35:18 -05:00
|
|
|
|
2011-05-20 05:03:04 -05:00
|
|
|
VIR_DEBUG("driver=%p, dconn=%p, cookiein=%s, cookieinlen=%d, "
|
|
|
|
"cookieout=%p, cookieoutlen=%p, uri_in=%s, uri_out=%p, "
|
|
|
|
"dname=%s, dom_xml=%s",
|
|
|
|
driver, dconn, NULLSTR(cookiein), cookieinlen,
|
|
|
|
cookieout, cookieoutlen, NULLSTR(uri_in), uri_out,
|
|
|
|
NULLSTR(dname), dom_xml);
|
2011-01-31 04:47:03 -06:00
|
|
|
|
2011-06-01 05:35:18 -05:00
|
|
|
if (virTimeMs(&now) < 0)
|
2011-01-31 04:47:03 -06:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* The URI passed in may be NULL or a string "tcp://somehostname:port".
|
|
|
|
*
|
|
|
|
* If the URI passed in is NULL then we allocate a port number
|
|
|
|
* from our pool of port numbers and return a URI of
|
|
|
|
* "tcp://ourhostname:port".
|
|
|
|
*
|
|
|
|
* If the URI passed in is not NULL then we try to parse out the
|
|
|
|
* port number and use that (note that the hostname is assumed
|
|
|
|
* to be a correct hostname which refers to the target machine).
|
|
|
|
*/
|
|
|
|
if (uri_in == NULL) {
|
|
|
|
this_port = QEMUD_MIGRATION_FIRST_PORT + port++;
|
|
|
|
if (port == QEMUD_MIGRATION_NUM_PORTS) port = 0;
|
|
|
|
|
|
|
|
/* Get hostname */
|
|
|
|
if ((hostname = virGetHostname(NULL)) == NULL)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (STRPREFIX(hostname, "localhost")) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("hostname on destination resolved to localhost, but migration requires an FQDN"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX this really should have been a properly well-formed
|
|
|
|
* URI, but we can't add in tcp:// now without breaking
|
|
|
|
* compatability with old targets. We at least make the
|
|
|
|
* new targets accept both syntaxes though.
|
|
|
|
*/
|
|
|
|
/* Caller frees */
|
|
|
|
internalret = virAsprintf(uri_out, "tcp:%s:%d", hostname, this_port);
|
|
|
|
if (internalret < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Check the URI starts with "tcp:". We will escape the
|
|
|
|
* URI when passing it to the qemu monitor, so bad
|
|
|
|
* characters in hostname part don't matter.
|
|
|
|
*/
|
|
|
|
if (!STRPREFIX (uri_in, "tcp:")) {
|
|
|
|
qemuReportError (VIR_ERR_INVALID_ARG,
|
|
|
|
"%s", _("only tcp URIs are supported for KVM/QEMU migrations"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the port number. */
|
|
|
|
p = strrchr (uri_in, ':');
|
|
|
|
if (p == strchr(uri_in, ':')) {
|
|
|
|
/* Generate a port */
|
|
|
|
this_port = QEMUD_MIGRATION_FIRST_PORT + port++;
|
|
|
|
if (port == QEMUD_MIGRATION_NUM_PORTS)
|
|
|
|
port = 0;
|
|
|
|
|
|
|
|
/* Caller frees */
|
|
|
|
if (virAsprintf(uri_out, "%s:%d", uri_in, this_port) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
p++; /* definitely has a ':' in it, see above */
|
|
|
|
this_port = virParseNumber (&p);
|
|
|
|
if (this_port == -1 || p-uri_in != strlen (uri_in)) {
|
|
|
|
qemuReportError(VIR_ERR_INVALID_ARG,
|
|
|
|
"%s", _("URI ended with incorrect ':port'"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*uri_out)
|
|
|
|
VIR_DEBUG("Generated uri_out=%s", *uri_out);
|
|
|
|
|
|
|
|
/* Parse the domain XML. */
|
|
|
|
if (!(def = virDomainDefParseString(driver->caps, dom_xml,
|
|
|
|
VIR_DOMAIN_XML_INACTIVE)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!qemuMigrationIsAllowed(def))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Target domain name, maybe renamed. */
|
|
|
|
if (dname) {
|
|
|
|
VIR_FREE(def->name);
|
|
|
|
def->name = strdup(dname);
|
|
|
|
if (def->name == NULL)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (virDomainObjIsDuplicate(&driver->domains, def, 1) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!(vm = virDomainAssignDef(driver->caps,
|
|
|
|
&driver->domains,
|
|
|
|
def, true))) {
|
|
|
|
/* virDomainAssignDef already set the error */
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
def = NULL;
|
|
|
|
priv = vm->privateData;
|
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
|
|
|
|
QEMU_MIGRATION_COOKIE_LOCKSTATE)))
|
2011-01-24 12:06:16 -06:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
priv->jobActive = QEMU_JOB_MIGRATION_OUT;
|
|
|
|
|
|
|
|
/* Domain starts inactive, even if the domain XML had an id field. */
|
|
|
|
vm->def->id = -1;
|
|
|
|
|
|
|
|
/* Start the QEMU daemon, with the same command-line arguments plus
|
|
|
|
* -incoming tcp:0.0.0.0:port
|
|
|
|
*/
|
|
|
|
snprintf (migrateFrom, sizeof (migrateFrom), "tcp:0.0.0.0:%d", this_port);
|
2011-06-23 04:37:57 -05:00
|
|
|
if (qemuProcessStart(dconn, driver, vm, migrateFrom, true, false,
|
2011-01-31 04:47:03 -06:00
|
|
|
-1, NULL, VIR_VM_OP_MIGRATE_IN_START) < 0) {
|
2011-03-08 15:28:51 -06:00
|
|
|
qemuAuditDomainStart(vm, "migrated", false);
|
2011-01-31 04:47:03 -06:00
|
|
|
/* Note that we don't set an error here because qemuProcessStart
|
|
|
|
* should have already done that.
|
|
|
|
*/
|
|
|
|
if (!vm->persistent) {
|
|
|
|
if (qemuDomainObjEndJob(vm) > 0)
|
|
|
|
virDomainRemoveInactive(&driver->domains, vm);
|
|
|
|
vm = NULL;
|
|
|
|
}
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if (mig->lockState) {
|
|
|
|
VIR_DEBUG("Received lockstate %s", mig->lockState);
|
|
|
|
VIR_FREE(priv->lockState);
|
|
|
|
priv->lockState = mig->lockState;
|
|
|
|
mig->lockState = NULL;
|
|
|
|
} else {
|
|
|
|
VIR_DEBUG("Received no lockstate");
|
|
|
|
}
|
|
|
|
|
2011-02-17 07:17:59 -06:00
|
|
|
if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen,
|
|
|
|
QEMU_MIGRATION_COOKIE_GRAPHICS) < 0) {
|
2011-01-24 12:06:16 -06:00
|
|
|
/* We could tear down the whole guest here, but
|
|
|
|
* cookie data is (so far) non-critical, so that
|
|
|
|
* seems a little harsh. We'll just warn for now.
|
|
|
|
*/
|
|
|
|
VIR_WARN("Unable to encode migration cookie");
|
|
|
|
}
|
|
|
|
|
2011-03-08 15:28:51 -06:00
|
|
|
qemuAuditDomainStart(vm, "migrated", true);
|
2011-01-31 04:47:03 -06:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED,
|
|
|
|
VIR_DOMAIN_EVENT_STARTED_MIGRATED);
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
endjob:
|
|
|
|
if (vm &&
|
|
|
|
qemuDomainObjEndJob(vm) == 0)
|
|
|
|
vm = NULL;
|
|
|
|
|
|
|
|
/* We set a fake job active which is held across
|
|
|
|
* API calls until the finish() call. This prevents
|
|
|
|
* any other APIs being invoked while incoming
|
|
|
|
* migration is taking place
|
|
|
|
*/
|
|
|
|
if (vm &&
|
|
|
|
virDomainObjIsActive(vm)) {
|
|
|
|
priv->jobActive = QEMU_JOB_MIGRATION_IN;
|
|
|
|
priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
|
2011-06-01 05:35:18 -05:00
|
|
|
priv->jobStart = now;
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(hostname);
|
|
|
|
virDomainDefFree(def);
|
|
|
|
if (ret != 0)
|
|
|
|
VIR_FREE(*uri_out);
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-01-24 12:06:16 -06:00
|
|
|
qemuMigrationCookieFree(mig);
|
2011-01-31 04:47:03 -06:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Perform migration using QEMU's native TCP migrate support,
|
|
|
|
* not encrypted obviously
|
|
|
|
*/
|
|
|
|
static int doNativeMigrate(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *uri,
|
2011-01-24 12:06:16 -06:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
2011-01-31 04:47:03 -06:00
|
|
|
unsigned int flags,
|
|
|
|
const char *dname ATTRIBUTE_UNUSED,
|
|
|
|
unsigned long resource)
|
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
xmlURIPtr uribits = NULL;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
|
2011-01-24 12:06:16 -06:00
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
2011-05-20 05:03:04 -05:00
|
|
|
VIR_DEBUG("driver=%p, vm=%p, uri=%s, cookiein=%s, cookieinlen=%d, "
|
|
|
|
"cookieout=%p, cookieoutlen=%p, flags=%u, dname=%s, resource=%lu",
|
|
|
|
driver, vm, uri, NULLSTR(cookiein), cookieinlen,
|
|
|
|
cookieout, cookieoutlen, flags, NULLSTR(dname), resource);
|
2011-01-24 12:06:16 -06:00
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if (virLockManagerPluginUsesState(driver->lockManager) &&
|
|
|
|
!cookieout) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Migration with lock driver %s requires cookie support"),
|
|
|
|
virLockManagerPluginGetName(driver->lockManager));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
|
2011-02-17 07:17:59 -06:00
|
|
|
QEMU_MIGRATION_COOKIE_GRAPHICS)))
|
2011-01-24 12:06:16 -06:00
|
|
|
goto cleanup;
|
2011-01-31 04:47:03 -06:00
|
|
|
|
2011-02-17 07:39:36 -06:00
|
|
|
if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0)
|
|
|
|
VIR_WARN("unable to provide data for graphics client relocation");
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
/* Issue the migrate command. */
|
|
|
|
if (STRPREFIX(uri, "tcp:") && !STRPREFIX(uri, "tcp://")) {
|
|
|
|
/* HACK: source host generates bogus URIs, so fix them up */
|
|
|
|
char *tmpuri;
|
|
|
|
if (virAsprintf(&tmpuri, "tcp://%s", uri + strlen("tcp:")) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
uribits = xmlParseURI(tmpuri);
|
|
|
|
VIR_FREE(tmpuri);
|
|
|
|
} else {
|
|
|
|
uribits = xmlParseURI(uri);
|
|
|
|
}
|
|
|
|
if (!uribits) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("cannot parse URI %s"), uri);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-20 07:29:42 -05:00
|
|
|
/* Before EnterMonitor, since qemuProcessStopCPUs already does that */
|
|
|
|
if (!(flags & VIR_MIGRATE_LIVE) &&
|
|
|
|
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
|
|
|
if (qemuMigrationSetOffline(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
if (resource > 0 &&
|
|
|
|
qemuMonitorSetMigrationSpeed(priv->mon, resource) < 0) {
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & VIR_MIGRATE_NON_SHARED_DISK)
|
|
|
|
background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
|
|
|
|
|
|
|
|
if (flags & VIR_MIGRATE_NON_SHARED_INC)
|
|
|
|
background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
|
|
|
|
|
|
|
|
if (qemuMonitorMigrateToHost(priv->mon, background_flags, uribits->server,
|
|
|
|
uribits->port) < 0) {
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
|
|
|
if (qemuMigrationWaitForCompletion(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-06-02 10:40:33 -05:00
|
|
|
/* When migration completed, QEMU will have paused the
|
|
|
|
* CPUs for us, but unless we're using the JSON monitor
|
|
|
|
* we won't have been notified of this, so might still
|
|
|
|
* think we're running. For v2 protocol this doesn't
|
|
|
|
* matter because we'll kill the VM soon, but for v3
|
|
|
|
* this is important because we stay paused until the
|
|
|
|
* confirm3 step, but need to release the lock state
|
|
|
|
*/
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
|
|
|
if (qemuMigrationSetOffline(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-01-24 12:06:16 -06:00
|
|
|
if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0)
|
|
|
|
VIR_WARN("Unable to encode migration cookie");
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2011-01-24 12:06:16 -06:00
|
|
|
qemuMigrationCookieFree(mig);
|
2011-01-31 04:47:03 -06:00
|
|
|
xmlFreeURI(uribits);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define TUNNEL_SEND_BUF_SIZE 65536
|
|
|
|
|
2011-05-09 10:52:42 -05:00
|
|
|
typedef struct _qemuMigrationIOThread qemuMigrationIOThread;
|
|
|
|
typedef qemuMigrationIOThread *qemuMigrationIOThreadPtr;
|
|
|
|
struct _qemuMigrationIOThread {
|
|
|
|
virThread thread;
|
|
|
|
virStreamPtr st;
|
|
|
|
int sock;
|
|
|
|
virError err;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void qemuMigrationIOFunc(void *arg)
|
2011-01-31 04:47:03 -06:00
|
|
|
{
|
2011-05-09 10:52:42 -05:00
|
|
|
qemuMigrationIOThreadPtr data = arg;
|
2011-01-31 04:47:03 -06:00
|
|
|
char *buffer;
|
|
|
|
int nbytes = TUNNEL_SEND_BUF_SIZE;
|
|
|
|
|
|
|
|
if (VIR_ALLOC_N(buffer, TUNNEL_SEND_BUF_SIZE) < 0) {
|
|
|
|
virReportOOMError();
|
2011-05-09 10:52:42 -05:00
|
|
|
virStreamAbort(data->st);
|
|
|
|
goto error;
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
for (;;) {
|
2011-05-09 10:52:42 -05:00
|
|
|
nbytes = saferead(data->sock, buffer, TUNNEL_SEND_BUF_SIZE);
|
2011-01-31 04:47:03 -06:00
|
|
|
if (nbytes < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("tunnelled migration failed to read from qemu"));
|
2011-05-09 10:52:42 -05:00
|
|
|
virStreamAbort(data->st);
|
2011-01-31 04:47:03 -06:00
|
|
|
VIR_FREE(buffer);
|
2011-05-09 10:52:42 -05:00
|
|
|
goto error;
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
|
|
|
else if (nbytes == 0)
|
|
|
|
/* EOF; get out of here */
|
|
|
|
break;
|
|
|
|
|
2011-05-09 10:52:42 -05:00
|
|
|
if (virStreamSend(data->st, buffer, nbytes) < 0) {
|
2011-01-31 04:47:03 -06:00
|
|
|
VIR_FREE(buffer);
|
2011-05-09 10:52:42 -05:00
|
|
|
goto error;
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
VIR_FREE(buffer);
|
|
|
|
|
2011-05-09 10:52:42 -05:00
|
|
|
if (virStreamFinish(data->st) < 0)
|
|
|
|
goto error;
|
2011-01-31 04:47:03 -06:00
|
|
|
|
2011-05-09 10:52:42 -05:00
|
|
|
return;
|
|
|
|
|
|
|
|
error:
|
|
|
|
virCopyLastError(&data->err);
|
|
|
|
virResetLastError();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static qemuMigrationIOThreadPtr
|
|
|
|
qemuMigrationStartTunnel(virStreamPtr st,
|
|
|
|
int sock)
|
|
|
|
{
|
|
|
|
qemuMigrationIOThreadPtr io;
|
|
|
|
|
|
|
|
if (VIR_ALLOC(io) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
io->st = st;
|
|
|
|
io->sock = sock;
|
|
|
|
|
|
|
|
if (virThreadCreate(&io->thread, true,
|
|
|
|
qemuMigrationIOFunc,
|
|
|
|
io) < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to create migration thread"));
|
|
|
|
VIR_FREE(io);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return io;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
qemuMigrationStopTunnel(qemuMigrationIOThreadPtr io)
|
|
|
|
{
|
|
|
|
int rv = -1;
|
|
|
|
virThreadJoin(&io->thread);
|
|
|
|
|
|
|
|
/* Forward error from the IO thread, to this thread */
|
|
|
|
if (io->err.code != VIR_ERR_OK) {
|
|
|
|
virSetError(&io->err);
|
|
|
|
virResetError(&io->err);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
rv = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
VIR_FREE(io);
|
|
|
|
return rv;
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
|
|
|
|
2011-04-20 07:12:43 -05:00
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
static int doTunnelMigrate(struct qemud_driver *driver,
|
|
|
|
virDomainObjPtr vm,
|
2011-04-20 07:12:43 -05:00
|
|
|
virStreamPtr st,
|
2011-02-03 05:09:28 -06:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
2011-01-31 04:47:03 -06:00
|
|
|
unsigned long flags,
|
2011-04-20 11:56:35 -05:00
|
|
|
unsigned long resource)
|
2011-01-31 04:47:03 -06:00
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
int client_sock = -1;
|
|
|
|
int qemu_sock = -1;
|
|
|
|
struct sockaddr_un sa_qemu, sa_client;
|
|
|
|
socklen_t addrlen;
|
|
|
|
int status;
|
|
|
|
unsigned long long transferred, remaining, total;
|
2011-04-20 07:12:43 -05:00
|
|
|
char *unixfile = NULL;
|
2011-01-31 04:47:03 -06:00
|
|
|
unsigned int background_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
|
2011-04-20 07:12:43 -05:00
|
|
|
int ret = -1;
|
2011-02-03 05:09:28 -06:00
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
2011-05-09 10:52:42 -05:00
|
|
|
qemuMigrationIOThreadPtr iothread = NULL;
|
2011-05-20 05:03:04 -05:00
|
|
|
VIR_DEBUG("driver=%p, vm=%p, st=%p, cookiein=%s, cookieinlen=%d, "
|
|
|
|
"cookieout=%p, cookieoutlen=%p, flags=%lu, resource=%lu",
|
|
|
|
driver, vm, st, NULLSTR(cookiein), cookieinlen,
|
|
|
|
cookieout, cookieoutlen, flags, resource);
|
2011-01-31 04:47:03 -06:00
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if (virLockManagerPluginUsesState(driver->lockManager) &&
|
|
|
|
!cookieout) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Migration with lock driver %s requires cookie support"),
|
|
|
|
virLockManagerPluginGetName(driver->lockManager));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-04-20 07:12:43 -05:00
|
|
|
if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX) &&
|
|
|
|
!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("Source qemu is too old to support tunnelled migration"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
|
|
|
|
if (virAsprintf(&unixfile, "%s/qemu.tunnelmigrate.src.%s",
|
|
|
|
driver->libDir, vm->def->name) < 0) {
|
|
|
|
virReportOOMError();
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_sock = socket(AF_UNIX, SOCK_STREAM, 0);
|
|
|
|
if (qemu_sock < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("cannot open tunnelled migration socket"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
memset(&sa_qemu, 0, sizeof(sa_qemu));
|
|
|
|
sa_qemu.sun_family = AF_UNIX;
|
|
|
|
if (virStrcpy(sa_qemu.sun_path, unixfile,
|
|
|
|
sizeof(sa_qemu.sun_path)) == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unix socket '%s' too big for destination"),
|
|
|
|
unixfile);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
unlink(unixfile);
|
|
|
|
if (bind(qemu_sock, (struct sockaddr *)&sa_qemu, sizeof(sa_qemu)) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Cannot bind to unix socket '%s' for tunnelled migration"),
|
|
|
|
unixfile);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (listen(qemu_sock, 1) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Cannot listen on unix socket '%s' for tunnelled migration"),
|
|
|
|
unixfile);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (chown(unixfile, driver->user, driver->group) < 0) {
|
|
|
|
virReportSystemError(errno,
|
|
|
|
_("Cannot change unix socket '%s' owner"),
|
|
|
|
unixfile);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the domain may have shutdown or crashed while we had the locks dropped
|
|
|
|
* in qemuDomainObjEnterRemoteWithDriver, so check again
|
|
|
|
*/
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
|
2011-02-03 05:09:28 -06:00
|
|
|
QEMU_MIGRATION_COOKIE_GRAPHICS)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig) < 0)
|
|
|
|
VIR_WARN("unable to provide data for graphics client relocation");
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
/* 3. start migration on source */
|
2011-05-20 07:29:42 -05:00
|
|
|
/* Before EnterMonitor, since qemuProcessStopCPUs already does that */
|
|
|
|
if (!(flags & VIR_MIGRATE_LIVE) &&
|
|
|
|
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
|
|
|
if (qemuMigrationSetOffline(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-04-20 11:56:35 -05:00
|
|
|
if (resource > 0 &&
|
|
|
|
qemuMonitorSetMigrationSpeed(priv->mon, resource) < 0) {
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-04-20 07:12:43 -05:00
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
if (flags & VIR_MIGRATE_NON_SHARED_DISK)
|
|
|
|
background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_DISK;
|
|
|
|
if (flags & VIR_MIGRATE_NON_SHARED_INC)
|
|
|
|
background_flags |= QEMU_MONITOR_MIGRATE_NON_SHARED_INC;
|
2011-04-20 07:12:43 -05:00
|
|
|
|
2011-05-04 07:12:57 -05:00
|
|
|
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_UNIX)) {
|
2011-04-20 07:12:43 -05:00
|
|
|
ret = qemuMonitorMigrateToUnix(priv->mon, background_flags,
|
2011-01-31 04:47:03 -06:00
|
|
|
unixfile);
|
2011-04-20 07:12:43 -05:00
|
|
|
} else if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_EXEC)) {
|
2011-01-31 04:47:03 -06:00
|
|
|
const char *args[] = { "nc", "-U", unixfile, NULL };
|
2011-04-20 07:12:43 -05:00
|
|
|
ret = qemuMonitorMigrateToCommand(priv->mon, QEMU_MONITOR_MIGRATE_BACKGROUND, args);
|
2011-01-31 04:47:03 -06:00
|
|
|
} else {
|
2011-04-20 07:12:43 -05:00
|
|
|
ret = -1;
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
2011-04-20 07:12:43 -05:00
|
|
|
if (ret < 0) {
|
2011-01-31 04:47:03 -06:00
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s", _("tunnelled migration monitor command failed"));
|
2011-04-20 07:12:43 -05:00
|
|
|
goto cleanup;
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
2011-04-20 07:12:43 -05:00
|
|
|
ret = -1;
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* From this point onwards we *must* call cancel to abort the
|
|
|
|
* migration on source if anything goes wrong */
|
|
|
|
|
|
|
|
/* it is also possible that the migrate didn't fail initially, but
|
|
|
|
* rather failed later on. Check the output of "info migrate"
|
|
|
|
*/
|
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
if (qemuMonitorGetMigrationStatus(priv->mon,
|
|
|
|
&status,
|
|
|
|
&transferred,
|
|
|
|
&remaining,
|
|
|
|
&total) < 0) {
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
goto cancel;
|
|
|
|
}
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
|
|
|
|
if (status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
"%s",_("migrate failed"));
|
|
|
|
goto cancel;
|
|
|
|
}
|
|
|
|
|
|
|
|
addrlen = sizeof(sa_client);
|
|
|
|
while ((client_sock = accept(qemu_sock, (struct sockaddr *)&sa_client, &addrlen)) < 0) {
|
|
|
|
if (errno == EAGAIN || errno == EINTR)
|
|
|
|
continue;
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("tunnelled migration failed to accept from qemu"));
|
|
|
|
goto cancel;
|
|
|
|
}
|
|
|
|
|
2011-05-09 10:52:42 -05:00
|
|
|
if (!(iothread = qemuMigrationStartTunnel(st, client_sock)))
|
|
|
|
goto cancel;
|
|
|
|
|
|
|
|
ret = qemuMigrationWaitForCompletion(driver, vm);
|
|
|
|
|
2011-06-02 10:40:33 -05:00
|
|
|
/* When migration completed, QEMU will have paused the
|
|
|
|
* CPUs for us, but unless we're using the JSON monitor
|
|
|
|
* we won't have been notified of this, so might still
|
|
|
|
* think we're running. For v2 protocol this doesn't
|
|
|
|
* matter because we'll kill the VM soon, but for v3
|
|
|
|
* this is important because we stay paused until the
|
|
|
|
* confirm3 step, but need to release the lock state
|
|
|
|
*/
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
|
|
|
|
if (qemuMigrationSetOffline(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-05-09 10:52:42 -05:00
|
|
|
/* Close now to ensure the IO thread quits & is joinable in next method */
|
|
|
|
VIR_FORCE_CLOSE(client_sock);
|
|
|
|
|
|
|
|
if (qemuMigrationStopTunnel(iothread) < 0)
|
|
|
|
ret = -1;
|
2011-01-31 04:47:03 -06:00
|
|
|
|
2011-02-03 05:09:28 -06:00
|
|
|
if (ret == 0 &&
|
|
|
|
qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0)
|
|
|
|
VIR_WARN("Unable to encode migration cookie");
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
cancel:
|
2011-04-20 07:12:43 -05:00
|
|
|
if (ret != 0 && virDomainObjIsActive(vm)) {
|
2011-04-21 11:23:13 -05:00
|
|
|
VIR_FORCE_CLOSE(client_sock);
|
|
|
|
VIR_FORCE_CLOSE(qemu_sock);
|
2011-01-31 04:47:03 -06:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
|
|
|
qemuMonitorMigrateCancel(priv->mon);
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
}
|
|
|
|
|
2011-04-20 07:12:43 -05:00
|
|
|
cleanup:
|
2011-02-03 05:09:28 -06:00
|
|
|
qemuMigrationCookieFree(mig);
|
2011-04-20 07:12:43 -05:00
|
|
|
VIR_FORCE_CLOSE(client_sock);
|
|
|
|
VIR_FORCE_CLOSE(qemu_sock);
|
|
|
|
if (unixfile) {
|
|
|
|
unlink(unixfile);
|
|
|
|
VIR_FREE(unixfile);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-04-20 07:48:58 -05:00
|
|
|
/* This is essentially a re-impl of virDomainMigrateVersion2
|
|
|
|
* from libvirt.c, but running in source libvirtd context,
|
|
|
|
* instead of client app context & also adding in tunnel
|
|
|
|
* handling */
|
|
|
|
static int doPeer2PeerMigrate2(struct qemud_driver *driver,
|
|
|
|
virConnectPtr sconn,
|
2011-04-20 07:12:43 -05:00
|
|
|
virConnectPtr dconn,
|
|
|
|
virDomainObjPtr vm,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 08:18:53 -05:00
|
|
|
const char *dconnuri,
|
2011-04-20 07:12:43 -05:00
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource)
|
2011-01-31 04:47:03 -06:00
|
|
|
{
|
|
|
|
virDomainPtr ddomain = NULL;
|
|
|
|
char *uri_out = NULL;
|
2011-01-24 12:06:16 -06:00
|
|
|
char *cookie = NULL;
|
2011-04-20 07:48:58 -05:00
|
|
|
char *dom_xml = NULL;
|
|
|
|
int cookielen = 0, ret;
|
|
|
|
virErrorPtr orig_err = NULL;
|
|
|
|
int cancelled;
|
|
|
|
virStreamPtr st = NULL;
|
2011-05-20 05:03:04 -05:00
|
|
|
VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, vm=%p, dconnuri=%s, "
|
|
|
|
"flags=%lu, dname=%s, resource=%lu",
|
|
|
|
driver, sconn, dconn, vm, NULLSTR(dconnuri),
|
|
|
|
flags, NULLSTR(dname), resource);
|
2011-01-31 04:47:03 -06:00
|
|
|
|
2011-04-20 07:48:58 -05:00
|
|
|
/* In version 2 of the protocol, the prepare step is slightly
|
|
|
|
* different. We fetch the domain XML of the source domain
|
|
|
|
* and pass it to Prepare2.
|
|
|
|
*/
|
|
|
|
if (!(dom_xml = qemuDomainFormatXML(driver, vm,
|
|
|
|
VIR_DOMAIN_XML_SECURE |
|
|
|
|
VIR_DOMAIN_XML_UPDATE_CPU)))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
|
|
|
|
flags |= VIR_MIGRATE_PAUSED;
|
|
|
|
|
|
|
|
VIR_DEBUG("Prepare2 %p", dconn);
|
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
/*
|
|
|
|
* Tunnelled Migrate Version 2 does not support cookies
|
|
|
|
* due to missing parameters in the prepareTunnel() API.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (!(st = virStreamNew(dconn, 0)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
qemuDomainObjEnterRemoteWithDriver(driver, vm);
|
|
|
|
ret = dconn->driver->domainMigratePrepareTunnel
|
|
|
|
(dconn, st, flags, dname, resource, dom_xml);
|
|
|
|
qemuDomainObjExitRemoteWithDriver(driver, vm);
|
|
|
|
} else {
|
|
|
|
qemuDomainObjEnterRemoteWithDriver(driver, vm);
|
|
|
|
ret = dconn->driver->domainMigratePrepare2
|
|
|
|
(dconn, &cookie, &cookielen, NULL, &uri_out,
|
|
|
|
flags, dname, resource, dom_xml);
|
|
|
|
qemuDomainObjExitRemoteWithDriver(driver, vm);
|
|
|
|
}
|
|
|
|
VIR_FREE(dom_xml);
|
|
|
|
if (ret == -1)
|
2011-01-31 04:47:03 -06:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* the domain may have shutdown or crashed while we had the locks dropped
|
|
|
|
* in qemuDomainObjEnterRemoteWithDriver, so check again
|
|
|
|
*/
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-04-20 07:48:58 -05:00
|
|
|
if (!(flags & VIR_MIGRATE_TUNNELLED) &&
|
|
|
|
(uri_out == NULL)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
2011-01-31 04:47:03 -06:00
|
|
|
_("domainMigratePrepare2 did not set uri"));
|
2011-04-20 07:48:58 -05:00
|
|
|
cancelled = 1;
|
|
|
|
goto finish;
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
|
|
|
|
2011-04-20 07:48:58 -05:00
|
|
|
/* Perform the migration. The driver isn't supposed to return
|
|
|
|
* until the migration is complete.
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Perform %p", sconn);
|
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED)
|
2011-02-03 05:09:28 -06:00
|
|
|
ret = doTunnelMigrate(driver, vm, st,
|
|
|
|
NULL, 0, NULL, NULL,
|
|
|
|
flags, resource);
|
2011-04-20 07:48:58 -05:00
|
|
|
else
|
|
|
|
ret = doNativeMigrate(driver, vm, uri_out,
|
|
|
|
cookie, cookielen,
|
|
|
|
NULL, NULL, /* No out cookie with v2 migration */
|
|
|
|
flags, dname, resource);
|
|
|
|
|
|
|
|
/* Perform failed. Make sure Finish doesn't overwrite the error */
|
|
|
|
if (ret < 0)
|
|
|
|
orig_err = virSaveLastError();
|
2011-01-31 04:47:03 -06:00
|
|
|
|
2011-04-20 07:48:58 -05:00
|
|
|
/* If Perform returns < 0, then we need to cancel the VM
|
|
|
|
* startup on the destination
|
|
|
|
*/
|
|
|
|
cancelled = ret < 0 ? 1 : 0;
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
finish:
|
2011-04-20 07:48:58 -05:00
|
|
|
/* In version 2 of the migration protocol, we pass the
|
|
|
|
* status code from the sender to the destination host,
|
|
|
|
* so it can do any cleanup if the migration failed.
|
|
|
|
*/
|
2011-01-31 04:47:03 -06:00
|
|
|
dname = dname ? dname : vm->def->name;
|
2011-04-20 07:48:58 -05:00
|
|
|
VIR_DEBUG("Finish2 %p ret=%d", dconn, ret);
|
2011-01-31 04:47:03 -06:00
|
|
|
qemuDomainObjEnterRemoteWithDriver(driver, vm);
|
|
|
|
ddomain = dconn->driver->domainMigrateFinish2
|
2011-04-20 07:48:58 -05:00
|
|
|
(dconn, dname, cookie, cookielen,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 08:18:53 -05:00
|
|
|
uri_out ? uri_out : dconnuri, flags, cancelled);
|
2011-01-31 04:47:03 -06:00
|
|
|
qemuDomainObjExitRemoteWithDriver(driver, vm);
|
|
|
|
|
2011-04-20 07:48:58 -05:00
|
|
|
cleanup:
|
|
|
|
if (ddomain) {
|
2011-01-31 04:47:03 -06:00
|
|
|
virUnrefDomain(ddomain);
|
2011-04-20 07:48:58 -05:00
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
ret = -1;
|
|
|
|
}
|
2011-01-31 04:47:03 -06:00
|
|
|
|
2011-04-20 07:48:58 -05:00
|
|
|
if (st)
|
|
|
|
virUnrefStream(st);
|
|
|
|
|
|
|
|
if (orig_err) {
|
|
|
|
virSetError(orig_err);
|
|
|
|
virFreeError(orig_err);
|
|
|
|
}
|
|
|
|
VIR_FREE(uri_out);
|
2011-01-24 12:06:16 -06:00
|
|
|
VIR_FREE(cookie);
|
2011-04-20 07:48:58 -05:00
|
|
|
|
|
|
|
return ret;
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-02-03 05:09:28 -06:00
|
|
|
/* This is essentially a re-impl of virDomainMigrateVersion3
|
|
|
|
* from libvirt.c, but running in source libvirtd context,
|
|
|
|
* instead of client app context & also adding in tunnel
|
|
|
|
* handling */
|
|
|
|
static int doPeer2PeerMigrate3(struct qemud_driver *driver,
|
|
|
|
virConnectPtr sconn,
|
|
|
|
virConnectPtr dconn,
|
|
|
|
virDomainObjPtr vm,
|
2011-05-18 04:26:30 -05:00
|
|
|
const char *xmlin,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 08:18:53 -05:00
|
|
|
const char *dconnuri,
|
2011-02-03 05:09:28 -06:00
|
|
|
const char *uri,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
|
|
|
unsigned long resource)
|
|
|
|
{
|
|
|
|
virDomainPtr ddomain = NULL;
|
|
|
|
char *uri_out = NULL;
|
|
|
|
char *cookiein = NULL;
|
|
|
|
char *cookieout = NULL;
|
|
|
|
char *dom_xml = NULL;
|
|
|
|
int cookieinlen = 0;
|
|
|
|
int cookieoutlen = 0;
|
|
|
|
int ret = -1;
|
|
|
|
virErrorPtr orig_err = NULL;
|
|
|
|
int cancelled;
|
|
|
|
virStreamPtr st = NULL;
|
2011-05-20 05:03:04 -05:00
|
|
|
VIR_DEBUG("driver=%p, sconn=%p, dconn=%p, vm=%p, xmlin=%s, "
|
|
|
|
"dconnuri=%s, uri=%s, flags=%lu, dname=%s, resource=%lu",
|
|
|
|
driver, sconn, dconn, vm, NULLSTR(xmlin),
|
|
|
|
NULLSTR(dconnuri), NULLSTR(uri), flags,
|
|
|
|
NULLSTR(dname), resource);
|
2011-02-03 05:09:28 -06:00
|
|
|
|
2011-05-18 04:26:30 -05:00
|
|
|
dom_xml = qemuMigrationBegin(driver, vm, xmlin,
|
2011-02-03 05:09:28 -06:00
|
|
|
&cookieout, &cookieoutlen);
|
|
|
|
if (!dom_xml)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED)
|
|
|
|
flags |= VIR_MIGRATE_PAUSED;
|
|
|
|
|
|
|
|
VIR_DEBUG("Prepare3 %p", dconn);
|
|
|
|
cookiein = cookieout;
|
|
|
|
cookieinlen = cookieoutlen;
|
|
|
|
cookieout = NULL;
|
|
|
|
cookieoutlen = 0;
|
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED) {
|
|
|
|
if (!(st = virStreamNew(dconn, 0)))
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
qemuDomainObjEnterRemoteWithDriver(driver, vm);
|
|
|
|
ret = dconn->driver->domainMigratePrepareTunnel3
|
|
|
|
(dconn, st, cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen,
|
|
|
|
flags, dname, resource, dom_xml);
|
|
|
|
qemuDomainObjExitRemoteWithDriver(driver, vm);
|
|
|
|
} else {
|
|
|
|
qemuDomainObjEnterRemoteWithDriver(driver, vm);
|
|
|
|
ret = dconn->driver->domainMigratePrepare3
|
|
|
|
(dconn, cookiein, cookieinlen, &cookieout, &cookieoutlen,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 08:18:53 -05:00
|
|
|
uri, &uri_out, flags, dname, resource, dom_xml);
|
2011-02-03 05:09:28 -06:00
|
|
|
qemuDomainObjExitRemoteWithDriver(driver, vm);
|
|
|
|
}
|
|
|
|
VIR_FREE(dom_xml);
|
|
|
|
if (ret == -1)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
if (!(flags & VIR_MIGRATE_TUNNELLED) &&
|
|
|
|
(uri_out == NULL)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("domainMigratePrepare3 did not set uri"));
|
|
|
|
cancelled = 1;
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Perform the migration. The driver isn't supposed to return
|
|
|
|
* until the migration is complete. The src VM should remain
|
|
|
|
* running, but in paused state until the destination can
|
|
|
|
* confirm migration completion.
|
|
|
|
*/
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 08:18:53 -05:00
|
|
|
VIR_DEBUG("Perform3 %p uri=%s uri_out=%s", sconn, uri, uri_out);
|
2011-02-03 05:09:28 -06:00
|
|
|
VIR_FREE(cookiein);
|
|
|
|
cookiein = cookieout;
|
|
|
|
cookieinlen = cookieoutlen;
|
|
|
|
cookieout = NULL;
|
|
|
|
cookieoutlen = 0;
|
|
|
|
if (flags & VIR_MIGRATE_TUNNELLED)
|
|
|
|
ret = doTunnelMigrate(driver, vm, st,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen,
|
|
|
|
flags, resource);
|
|
|
|
else
|
|
|
|
ret = doNativeMigrate(driver, vm, uri_out,
|
|
|
|
cookiein, cookieinlen,
|
|
|
|
&cookieout, &cookieoutlen,
|
|
|
|
flags, dname, resource);
|
|
|
|
|
|
|
|
/* Perform failed. Make sure Finish doesn't overwrite the error */
|
|
|
|
if (ret < 0)
|
|
|
|
orig_err = virSaveLastError();
|
|
|
|
|
|
|
|
/* If Perform returns < 0, then we need to cancel the VM
|
|
|
|
* startup on the destination
|
|
|
|
*/
|
|
|
|
cancelled = ret < 0 ? 1 : 0;
|
|
|
|
|
|
|
|
finish:
|
|
|
|
/*
|
|
|
|
* The status code from the source is passed to the destination.
|
|
|
|
* The dest can cleanup in the source indicated it failed to
|
|
|
|
* send all migration data. Returns NULL for ddomain if
|
|
|
|
* the dest was unable to complete migration.
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Finish3 %p ret=%d", dconn, ret);
|
|
|
|
VIR_FREE(cookiein);
|
|
|
|
cookiein = cookieout;
|
|
|
|
cookieinlen = cookieoutlen;
|
|
|
|
cookieout = NULL;
|
|
|
|
cookieoutlen = 0;
|
|
|
|
dname = dname ? dname : vm->def->name;
|
|
|
|
qemuDomainObjEnterRemoteWithDriver(driver, vm);
|
Fix the signature of virDomainMigrateFinish3 for error reporting
The current virDomainMigrateFinish3 method signature attempts to
distinguish two types of errors, by allowing return with ret== 0,
but ddomain == NULL, to indicate a failure to start the guest.
This is flawed, because when ret == 0, there is no way for the
virErrorPtr details to be sent back to the client.
Change the signature of virDomainMigrateFinish3 so it simply
returns a virDomainPtr, in the same way as virDomainMigrateFinish2
The disk locking code will protect against the only possible
failure mode this doesn't account for (loosing conenctivity to
libvirtd after Finish3 starts the CPUs, but before the client
sees the reply for Finish3).
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Change
virDomainMigrateFinish3 to return a virDomainPtr instead of int
* src/remote/remote_driver.c, src/remote/remote_protocol.x,
daemon/remote.c, src/qemu/qemu_driver.c, src/qemu/qemu_migration.c:
Update for API change
2011-05-24 07:05:33 -05:00
|
|
|
ddomain = dconn->driver->domainMigrateFinish3
|
2011-02-03 05:09:28 -06:00
|
|
|
(dconn, dname, cookiein, cookieinlen, &cookieout, &cookieoutlen,
|
Fix the signature of virDomainMigrateFinish3 for error reporting
The current virDomainMigrateFinish3 method signature attempts to
distinguish two types of errors, by allowing return with ret== 0,
but ddomain == NULL, to indicate a failure to start the guest.
This is flawed, because when ret == 0, there is no way for the
virErrorPtr details to be sent back to the client.
Change the signature of virDomainMigrateFinish3 so it simply
returns a virDomainPtr, in the same way as virDomainMigrateFinish2
The disk locking code will protect against the only possible
failure mode this doesn't account for (loosing conenctivity to
libvirtd after Finish3 starts the CPUs, but before the client
sees the reply for Finish3).
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Change
virDomainMigrateFinish3 to return a virDomainPtr instead of int
* src/remote/remote_driver.c, src/remote/remote_protocol.x,
daemon/remote.c, src/qemu/qemu_driver.c, src/qemu/qemu_migration.c:
Update for API change
2011-05-24 07:05:33 -05:00
|
|
|
dconnuri, uri_out ? uri_out : uri, flags, cancelled);
|
2011-02-03 05:09:28 -06:00
|
|
|
qemuDomainObjExitRemoteWithDriver(driver, vm);
|
|
|
|
|
Fix the signature of virDomainMigrateFinish3 for error reporting
The current virDomainMigrateFinish3 method signature attempts to
distinguish two types of errors, by allowing return with ret== 0,
but ddomain == NULL, to indicate a failure to start the guest.
This is flawed, because when ret == 0, there is no way for the
virErrorPtr details to be sent back to the client.
Change the signature of virDomainMigrateFinish3 so it simply
returns a virDomainPtr, in the same way as virDomainMigrateFinish2
The disk locking code will protect against the only possible
failure mode this doesn't account for (loosing conenctivity to
libvirtd after Finish3 starts the CPUs, but before the client
sees the reply for Finish3).
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Change
virDomainMigrateFinish3 to return a virDomainPtr instead of int
* src/remote/remote_driver.c, src/remote/remote_protocol.x,
daemon/remote.c, src/qemu/qemu_driver.c, src/qemu/qemu_migration.c:
Update for API change
2011-05-24 07:05:33 -05:00
|
|
|
/* If ddomain is NULL, then we were unable to start
|
|
|
|
* the guest on the target, and must restart on the
|
|
|
|
* source. There is a small chance that the ddomain
|
|
|
|
* is NULL due to an RPC failure, in which case
|
|
|
|
* ddomain could in fact be running on the dest.
|
|
|
|
* The lock manager plugins should take care of
|
|
|
|
* safety in this scenario.
|
2011-02-03 05:09:28 -06:00
|
|
|
*/
|
Fix the signature of virDomainMigrateFinish3 for error reporting
The current virDomainMigrateFinish3 method signature attempts to
distinguish two types of errors, by allowing return with ret== 0,
but ddomain == NULL, to indicate a failure to start the guest.
This is flawed, because when ret == 0, there is no way for the
virErrorPtr details to be sent back to the client.
Change the signature of virDomainMigrateFinish3 so it simply
returns a virDomainPtr, in the same way as virDomainMigrateFinish2
The disk locking code will protect against the only possible
failure mode this doesn't account for (loosing conenctivity to
libvirtd after Finish3 starts the CPUs, but before the client
sees the reply for Finish3).
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Change
virDomainMigrateFinish3 to return a virDomainPtr instead of int
* src/remote/remote_driver.c, src/remote/remote_protocol.x,
daemon/remote.c, src/qemu/qemu_driver.c, src/qemu/qemu_migration.c:
Update for API change
2011-05-24 07:05:33 -05:00
|
|
|
cancelled = ddomain == NULL ? 1 : 0;
|
2011-02-03 05:09:28 -06:00
|
|
|
|
2011-05-23 11:48:36 -05:00
|
|
|
/* If finish3 set an error, and we don't have an earlier
|
|
|
|
* one we need to preserve it in case confirm3 overwrites
|
|
|
|
*/
|
|
|
|
if (!orig_err)
|
|
|
|
orig_err = virSaveLastError();
|
|
|
|
|
2011-02-03 05:09:28 -06:00
|
|
|
/*
|
|
|
|
* If cancelled, then src VM will be restarted, else
|
|
|
|
* it will be killed
|
|
|
|
*/
|
|
|
|
VIR_DEBUG("Confirm3 %p ret=%d vm=%p", sconn, ret, vm);
|
|
|
|
VIR_FREE(cookiein);
|
|
|
|
cookiein = cookieout;
|
|
|
|
cookieinlen = cookieoutlen;
|
|
|
|
cookieout = NULL;
|
|
|
|
cookieoutlen = 0;
|
|
|
|
ret = qemuMigrationConfirm(driver, sconn, vm,
|
|
|
|
cookiein, cookieinlen,
|
2011-05-19 06:48:15 -05:00
|
|
|
flags, cancelled);
|
2011-02-03 05:09:28 -06:00
|
|
|
/* If Confirm3 returns -1, there's nothing more we can
|
|
|
|
* do, but fortunately worst case is that there is a
|
|
|
|
* domain left in 'paused' state on source.
|
|
|
|
*/
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (ddomain) {
|
|
|
|
virUnrefDomain(ddomain);
|
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (st)
|
|
|
|
virUnrefStream(st);
|
|
|
|
|
|
|
|
if (orig_err) {
|
|
|
|
virSetError(orig_err);
|
|
|
|
virFreeError(orig_err);
|
|
|
|
}
|
|
|
|
VIR_FREE(uri_out);
|
|
|
|
VIR_FREE(cookiein);
|
|
|
|
VIR_FREE(cookieout);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
static int doPeer2PeerMigrate(struct qemud_driver *driver,
|
2011-04-20 07:48:58 -05:00
|
|
|
virConnectPtr sconn,
|
2011-01-31 04:47:03 -06:00
|
|
|
virDomainObjPtr vm,
|
2011-05-18 04:26:30 -05:00
|
|
|
const char *xmlin,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 08:18:53 -05:00
|
|
|
const char *dconnuri,
|
2011-01-31 04:47:03 -06:00
|
|
|
const char *uri,
|
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
2011-06-05 22:05:34 -05:00
|
|
|
unsigned long resource,
|
|
|
|
bool *v3proto)
|
2011-01-31 04:47:03 -06:00
|
|
|
{
|
|
|
|
int ret = -1;
|
|
|
|
virConnectPtr dconn = NULL;
|
|
|
|
bool p2p;
|
2011-05-20 05:03:04 -05:00
|
|
|
VIR_DEBUG("driver=%p, sconn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
|
|
|
|
"uri=%s, flags=%lu, dname=%s, resource=%lu",
|
|
|
|
driver, sconn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
|
|
|
|
NULLSTR(uri), flags, NULLSTR(dname), resource);
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
/* the order of operations is important here; we make sure the
|
|
|
|
* destination side is completely setup before we touch the source
|
|
|
|
*/
|
|
|
|
|
|
|
|
qemuDomainObjEnterRemoteWithDriver(driver, vm);
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 08:18:53 -05:00
|
|
|
dconn = virConnectOpen(dconnuri);
|
2011-01-31 04:47:03 -06:00
|
|
|
qemuDomainObjExitRemoteWithDriver(driver, vm);
|
|
|
|
if (dconn == NULL) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED,
|
|
|
|
_("Failed to connect to remote libvirt URI %s"), uri);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuDomainObjEnterRemoteWithDriver(driver, vm);
|
|
|
|
p2p = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
|
|
|
VIR_DRV_FEATURE_MIGRATION_P2P);
|
2011-06-05 22:05:34 -05:00
|
|
|
/* v3proto reflects whether the caller used Perform3, but with
|
|
|
|
* p2p migrate, regardless of whether Perform3 or Perform3
|
|
|
|
* were used, we decide protocol based on what target supports
|
|
|
|
*/
|
|
|
|
*v3proto = VIR_DRV_SUPPORTS_FEATURE(dconn->driver, dconn,
|
|
|
|
VIR_DRV_FEATURE_MIGRATION_V3);
|
2011-01-31 04:47:03 -06:00
|
|
|
qemuDomainObjExitRemoteWithDriver(driver, vm);
|
2011-02-03 05:09:28 -06:00
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
if (!p2p) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_FAILED, "%s",
|
|
|
|
_("Destination libvirt does not support peer-to-peer migration protocol"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* domain may have been stopped while we were talking to remote daemon */
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
2011-06-05 22:05:34 -05:00
|
|
|
if (*v3proto)
|
2011-05-18 04:26:30 -05:00
|
|
|
ret = doPeer2PeerMigrate3(driver, sconn, dconn, vm, xmlin,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 08:18:53 -05:00
|
|
|
dconnuri, uri, flags, dname, resource);
|
2011-02-03 05:09:28 -06:00
|
|
|
else
|
|
|
|
ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 08:18:53 -05:00
|
|
|
dconnuri, flags, dname, resource);
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
/* don't call virConnectClose(), because that resets any pending errors */
|
|
|
|
qemuDomainObjEnterRemoteWithDriver(driver, vm);
|
|
|
|
virUnrefConnect(dconn);
|
|
|
|
qemuDomainObjExitRemoteWithDriver(driver, vm);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int qemuMigrationPerform(struct qemud_driver *driver,
|
|
|
|
virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
2011-05-18 04:26:30 -05:00
|
|
|
const char *xmlin,
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 08:18:53 -05:00
|
|
|
const char *dconnuri,
|
2011-01-31 04:47:03 -06:00
|
|
|
const char *uri,
|
2011-01-24 12:06:16 -06:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
2011-01-31 04:47:03 -06:00
|
|
|
unsigned long flags,
|
|
|
|
const char *dname,
|
2011-02-03 05:09:28 -06:00
|
|
|
unsigned long resource,
|
2011-05-23 07:50:11 -05:00
|
|
|
bool v3proto)
|
2011-01-31 04:47:03 -06:00
|
|
|
{
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
int resume = 0;
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
2011-05-20 05:03:04 -05:00
|
|
|
VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
|
|
|
|
"uri=%s, cookiein=%s, cookieinlen=%d, cookieout=%p, "
|
|
|
|
"cookieoutlen=%p, flags=%lu, dname=%s, resource=%lu, v3proto=%d",
|
|
|
|
driver, conn, vm, NULLSTR(xmlin), NULLSTR(dconnuri),
|
|
|
|
NULLSTR(uri), NULLSTR(cookiein), cookieinlen,
|
|
|
|
cookieout, cookieoutlen, flags, NULLSTR(dname),
|
|
|
|
resource, v3proto);
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
priv->jobActive = QEMU_JOB_MIGRATION_OUT;
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("domain is not running"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(&priv->jobInfo, 0, sizeof(priv->jobInfo));
|
|
|
|
priv->jobInfo.type = VIR_DOMAIN_JOB_UNBOUNDED;
|
|
|
|
|
2011-05-04 04:07:01 -05:00
|
|
|
resume = virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING;
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
|
2011-01-24 12:06:16 -06:00
|
|
|
if (cookieinlen) {
|
|
|
|
qemuReportError(VIR_ERR_OPERATION_INVALID,
|
|
|
|
"%s", _("received unexpected cookie with P2P migration"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-05-18 04:26:30 -05:00
|
|
|
if (doPeer2PeerMigrate(driver, conn, vm, xmlin,
|
2011-06-05 22:05:34 -05:00
|
|
|
dconnuri, uri, flags, dname,
|
|
|
|
resource, &v3proto) < 0)
|
2011-01-31 04:47:03 -06:00
|
|
|
/* doPeer2PeerMigrate already set the error, so just get out */
|
|
|
|
goto endjob;
|
|
|
|
} else {
|
Add a second URI parameter to virDomainMigratePerform3 method
The virDomainMigratePerform3 currently has a single URI parameter
whose meaning varies. It is either
- A QEMU migration URI (normal migration)
- A libvirtd connection URI (peer2peer migration)
Unfortunately when using peer2peer migration, without also
using tunnelled migration, it is possible that both URIs are
required.
This adds a second URI parameter to the virDomainMigratePerform3
method, to cope with this scenario. Each parameter how has a fixed
meaning.
NB, there is no way to actually take advantage of this yet,
since virDomainMigrate/virDomainMigrateToURI do not have any
way to provide the 2 separate URIs
* daemon/remote.c, src/remote/remote_driver.c,
src/remote/remote_protocol.x, src/remote_protocol-structs: Add
the second URI parameter to perform3 message
* src/driver.h, src/libvirt.c, src/libvirt_internal.h: Add
the second URI parameter to Perform3 method
* src/libvirt_internal.h, src/qemu/qemu_migration.c,
src/qemu/qemu_migration.h: Update to handle URIs correctly
2011-05-18 08:18:53 -05:00
|
|
|
if (dconnuri) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("Unexpected dconnuri parameter with non-peer2peer migration"));
|
|
|
|
goto endjob;
|
|
|
|
}
|
2011-01-24 12:06:16 -06:00
|
|
|
if (doNativeMigrate(driver, vm, uri, cookiein, cookieinlen,
|
|
|
|
cookieout, cookieoutlen,
|
|
|
|
flags, dname, resource) < 0)
|
2011-01-31 04:47:03 -06:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
2011-05-23 07:50:11 -05:00
|
|
|
/*
|
|
|
|
* In v3 protocol, the source VM is not killed off until the
|
|
|
|
* confirm step.
|
|
|
|
*/
|
|
|
|
if (v3proto) {
|
|
|
|
resume = 0;
|
|
|
|
} else {
|
2011-02-03 05:09:28 -06:00
|
|
|
qemuProcessStop(driver, vm, 1, VIR_DOMAIN_SHUTOFF_MIGRATED);
|
|
|
|
qemuAuditDomainStop(vm, "migrated");
|
|
|
|
resume = 0;
|
2011-01-31 04:47:03 -06:00
|
|
|
|
2011-02-03 05:09:28 -06:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
|
2011-01-31 04:47:03 -06:00
|
|
|
}
|
2011-01-24 12:06:16 -06:00
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
endjob:
|
2011-05-04 04:07:01 -05:00
|
|
|
if (resume && virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
|
2011-01-31 04:47:03 -06:00
|
|
|
/* we got here through some sort of failure; start the domain again */
|
2011-05-04 04:07:01 -05:00
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
|
|
|
VIR_DOMAIN_RUNNING_MIGRATION_CANCELED) < 0) {
|
2011-01-31 04:47:03 -06:00
|
|
|
/* Hm, we already know we are in error here. We don't want to
|
|
|
|
* overwrite the previous error, though, so we just throw something
|
|
|
|
* to the logs and hope for the best
|
|
|
|
*/
|
|
|
|
VIR_ERROR(_("Failed to resume guest %s after failure"),
|
|
|
|
vm->def->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
|
|
|
}
|
2011-05-19 06:48:15 -05:00
|
|
|
if (vm) {
|
|
|
|
if (qemuDomainObjEndJob(vm) == 0) {
|
|
|
|
vm = NULL;
|
|
|
|
} else if (!virDomainObjIsActive(vm) &&
|
|
|
|
(!vm->persistent || (flags & VIR_MIGRATE_UNDEFINE_SOURCE))) {
|
|
|
|
if (flags & VIR_MIGRATE_UNDEFINE_SOURCE)
|
|
|
|
virDomainDeleteConfig(driver->configDir, driver->autostartDir, vm);
|
|
|
|
virDomainRemoveInactive(&driver->domains, vm);
|
|
|
|
vm = NULL;
|
|
|
|
}
|
|
|
|
}
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#if WITH_MACVTAP
|
|
|
|
static void
|
|
|
|
qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def) {
|
|
|
|
int i;
|
|
|
|
int last_good_net = -1;
|
|
|
|
virDomainNetDefPtr net;
|
|
|
|
|
|
|
|
for (i = 0; i < def->nnets; i++) {
|
|
|
|
net = def->nets[i];
|
|
|
|
if (net->type == VIR_DOMAIN_NET_TYPE_DIRECT) {
|
|
|
|
if (vpAssociatePortProfileId(net->ifname,
|
|
|
|
net->mac,
|
|
|
|
net->data.direct.linkdev,
|
|
|
|
&net->data.direct.virtPortProfile,
|
|
|
|
def->uuid,
|
|
|
|
VIR_VM_OP_MIGRATE_IN_FINISH) != 0)
|
|
|
|
goto err_exit;
|
|
|
|
}
|
|
|
|
last_good_net = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
err_exit:
|
|
|
|
for (i = 0; i < last_good_net; i++) {
|
|
|
|
net = def->nets[i];
|
|
|
|
if (net->type == VIR_DOMAIN_NET_TYPE_DIRECT) {
|
|
|
|
vpDisassociatePortProfileId(net->ifname,
|
|
|
|
net->mac,
|
|
|
|
net->data.direct.linkdev,
|
|
|
|
&net->data.direct.virtPortProfile,
|
|
|
|
VIR_VM_OP_MIGRATE_IN_FINISH);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else /* !WITH_MACVTAP */
|
|
|
|
static void
|
|
|
|
qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def ATTRIBUTE_UNUSED) { }
|
|
|
|
#endif /* WITH_MACVTAP */
|
|
|
|
|
|
|
|
|
|
|
|
virDomainPtr
|
|
|
|
qemuMigrationFinish(struct qemud_driver *driver,
|
|
|
|
virConnectPtr dconn,
|
|
|
|
virDomainObjPtr vm,
|
2011-01-24 12:06:16 -06:00
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
|
|
|
char **cookieout,
|
|
|
|
int *cookieoutlen,
|
2011-01-31 04:47:03 -06:00
|
|
|
unsigned long flags,
|
2011-05-23 07:50:11 -05:00
|
|
|
int retcode,
|
|
|
|
bool v3proto)
|
2011-01-31 04:47:03 -06:00
|
|
|
{
|
|
|
|
virDomainPtr dom = NULL;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
int newVM = 1;
|
|
|
|
qemuDomainObjPrivatePtr priv = NULL;
|
2011-01-24 12:06:16 -06:00
|
|
|
qemuMigrationCookiePtr mig = NULL;
|
2011-05-20 05:03:04 -05:00
|
|
|
VIR_DEBUG("driver=%p, dconn=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
|
|
|
|
"cookieout=%p, cookieoutlen=%p, flags=%lu, retcode=%d",
|
|
|
|
driver, dconn, vm, NULLSTR(cookiein), cookieinlen,
|
|
|
|
cookieout, cookieoutlen, flags, retcode);
|
2011-05-23 11:48:36 -05:00
|
|
|
virErrorPtr orig_err = NULL;
|
2011-01-31 04:47:03 -06:00
|
|
|
|
|
|
|
priv = vm->privateData;
|
|
|
|
if (priv->jobActive != QEMU_JOB_MIGRATION_IN) {
|
|
|
|
qemuReportError(VIR_ERR_NO_DOMAIN,
|
|
|
|
_("domain '%s' is not processing incoming migration"), vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
priv->jobActive = QEMU_JOB_NONE;
|
|
|
|
memset(&priv->jobInfo, 0, sizeof(priv->jobInfo));
|
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
|
2011-01-24 12:06:16 -06:00
|
|
|
goto cleanup;
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
if (qemuDomainObjBeginJobWithDriver(driver, vm) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* Did the migration go as planned? If yes, return the domain
|
|
|
|
* object, but if no, clean up the empty qemu process.
|
|
|
|
*/
|
|
|
|
if (retcode == 0) {
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuMigrationVPAssociatePortProfiles(vm->def);
|
|
|
|
|
|
|
|
if (flags & VIR_MIGRATE_PERSIST_DEST) {
|
|
|
|
if (vm->persistent)
|
|
|
|
newVM = 0;
|
|
|
|
vm->persistent = 1;
|
|
|
|
|
|
|
|
if (virDomainSaveConfig(driver->configDir, vm->def) < 0) {
|
|
|
|
/* Hmpf. Migration was successful, but making it persistent
|
|
|
|
* was not. If we report successful, then when this domain
|
|
|
|
* shuts down, management tools are in for a surprise. On the
|
|
|
|
* other hand, if we report failure, then the management tools
|
|
|
|
* might try to restart the domain on the source side, even
|
|
|
|
* though the domain is actually running on the destination.
|
|
|
|
* Return a NULL dom pointer, and hope that this is a rare
|
|
|
|
* situation and management tools are smart.
|
|
|
|
*/
|
|
|
|
vm = NULL;
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED,
|
|
|
|
newVM ?
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED_ADDED :
|
|
|
|
VIR_DOMAIN_EVENT_DEFINED_UPDATED);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
event = NULL;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(flags & VIR_MIGRATE_PAUSED)) {
|
|
|
|
/* run 'cont' on the destination, which allows migration on qemu
|
|
|
|
* >= 0.10.6 to work properly. This isn't strictly necessary on
|
|
|
|
* older qemu's, but it also doesn't hurt anything there
|
|
|
|
*/
|
2011-05-04 04:07:01 -05:00
|
|
|
if (qemuProcessStartCPUs(driver, vm, dconn,
|
|
|
|
VIR_DOMAIN_RUNNING_MIGRATED) < 0) {
|
2011-05-23 11:48:36 -05:00
|
|
|
if (virGetLastError() == NULL)
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("resume operation failed"));
|
|
|
|
/* Need to save the current error, in case shutting
|
|
|
|
* down the process overwrites it
|
|
|
|
*/
|
|
|
|
orig_err = virSaveLastError();
|
|
|
|
|
2011-05-23 07:50:11 -05:00
|
|
|
/*
|
|
|
|
* In v3 protocol, the source VM is still available to
|
|
|
|
* restart during confirm() step, so we kill it off
|
|
|
|
* now.
|
|
|
|
* In v2 protocol, the source is dead, so we leave
|
|
|
|
* target in paused state, in case admin can fix
|
|
|
|
* things up
|
|
|
|
*/
|
|
|
|
if (v3proto) {
|
|
|
|
qemuProcessStop(driver, vm, 1, VIR_DOMAIN_SHUTOFF_FAILED);
|
|
|
|
qemuAuditDomainStop(vm, "failed");
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_FAILED);
|
|
|
|
if (!vm->persistent) {
|
|
|
|
if (qemuDomainObjEndJob(vm) > 0)
|
|
|
|
virDomainRemoveInactive(&driver->domains, vm);
|
|
|
|
vm = NULL;
|
|
|
|
}
|
|
|
|
}
|
2011-01-31 04:47:03 -06:00
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-05-23 07:50:11 -05:00
|
|
|
dom = virGetDomain (dconn, vm->def->name, vm->def->uuid);
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
2011-05-04 04:07:01 -05:00
|
|
|
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_PAUSED) {
|
|
|
|
virDomainObjSetState(vm, VIR_DOMAIN_PAUSED, VIR_DOMAIN_PAUSED_USER);
|
2011-01-31 04:47:03 -06:00
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED,
|
|
|
|
VIR_DOMAIN_EVENT_SUSPENDED_PAUSED);
|
|
|
|
}
|
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
|
|
|
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
|
|
|
goto endjob;
|
|
|
|
}
|
|
|
|
} else {
|
2011-05-04 04:07:01 -05:00
|
|
|
qemuProcessStop(driver, vm, 1, VIR_DOMAIN_SHUTOFF_FAILED);
|
2011-03-08 15:28:51 -06:00
|
|
|
qemuAuditDomainStop(vm, "failed");
|
2011-01-31 04:47:03 -06:00
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_FAILED);
|
|
|
|
if (!vm->persistent) {
|
|
|
|
if (qemuDomainObjEndJob(vm) > 0)
|
|
|
|
virDomainRemoveInactive(&driver->domains, vm);
|
|
|
|
vm = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-24 12:06:16 -06:00
|
|
|
if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, 0) < 0)
|
|
|
|
VIR_WARN("Unable to encode migration cookie");
|
|
|
|
|
2011-01-31 04:47:03 -06:00
|
|
|
endjob:
|
|
|
|
if (vm &&
|
|
|
|
qemuDomainObjEndJob(vm) == 0)
|
|
|
|
vm = NULL;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (vm)
|
|
|
|
virDomainObjUnlock(vm);
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
2011-01-24 12:06:16 -06:00
|
|
|
qemuMigrationCookieFree(mig);
|
2011-05-23 11:48:36 -05:00
|
|
|
if (orig_err) {
|
|
|
|
virSetError(orig_err);
|
|
|
|
virFreeError(orig_err);
|
|
|
|
}
|
2011-01-31 04:47:03 -06:00
|
|
|
return dom;
|
|
|
|
}
|
2011-03-09 18:35:13 -06:00
|
|
|
|
2011-02-03 05:09:28 -06:00
|
|
|
|
|
|
|
int qemuMigrationConfirm(struct qemud_driver *driver,
|
|
|
|
virConnectPtr conn,
|
|
|
|
virDomainObjPtr vm,
|
|
|
|
const char *cookiein,
|
|
|
|
int cookieinlen,
|
2011-05-19 06:48:15 -05:00
|
|
|
unsigned int flags ATTRIBUTE_UNUSED,
|
|
|
|
int retcode)
|
2011-02-03 05:09:28 -06:00
|
|
|
{
|
|
|
|
qemuMigrationCookiePtr mig;
|
|
|
|
virDomainEventPtr event = NULL;
|
|
|
|
int rv = -1;
|
2011-05-20 05:03:04 -05:00
|
|
|
VIR_DEBUG("driver=%p, conn=%p, vm=%p, cookiein=%s, cookieinlen=%d, "
|
|
|
|
"flags=%u, retcode=%d",
|
|
|
|
driver, conn, vm, NULLSTR(cookiein), cookieinlen,
|
|
|
|
flags, retcode);
|
2011-02-03 05:09:28 -06:00
|
|
|
|
2011-05-18 11:34:21 -05:00
|
|
|
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
|
2011-02-03 05:09:28 -06:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!virDomainObjIsActive(vm)) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR, "%s",
|
|
|
|
_("guest unexpectedly quit"));
|
2011-05-19 06:48:15 -05:00
|
|
|
goto cleanup;
|
2011-02-03 05:09:28 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Did the migration go as planned? If yes, kill off the
|
|
|
|
* domain object, but if no, resume CPUs
|
|
|
|
*/
|
|
|
|
if (retcode == 0) {
|
|
|
|
qemuProcessStop(driver, vm, 1, VIR_DOMAIN_SHUTOFF_MIGRATED);
|
|
|
|
qemuAuditDomainStop(vm, "migrated");
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED,
|
|
|
|
VIR_DOMAIN_EVENT_STOPPED_MIGRATED);
|
|
|
|
} else {
|
|
|
|
|
|
|
|
/* run 'cont' on the destination, which allows migration on qemu
|
|
|
|
* >= 0.10.6 to work properly. This isn't strictly necessary on
|
|
|
|
* older qemu's, but it also doesn't hurt anything there
|
|
|
|
*/
|
|
|
|
if (qemuProcessStartCPUs(driver, vm, conn,
|
|
|
|
VIR_DOMAIN_RUNNING_MIGRATED) < 0) {
|
|
|
|
if (virGetLastError() == NULL)
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
"%s", _("resume operation failed"));
|
2011-05-19 06:48:15 -05:00
|
|
|
goto cleanup;
|
2011-02-03 05:09:28 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
event = virDomainEventNewFromObj(vm,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED,
|
|
|
|
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
|
|
|
|
if (virDomainSaveStatus(driver->caps, driver->stateDir, vm) < 0) {
|
|
|
|
VIR_WARN("Failed to save status on vm %s", vm->def->name);
|
2011-05-19 06:48:15 -05:00
|
|
|
goto cleanup;
|
2011-02-03 05:09:28 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
qemuMigrationCookieFree(mig);
|
|
|
|
rv = 0;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
if (event)
|
|
|
|
qemuDomainEventQueue(driver, event);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-03-09 18:35:13 -06:00
|
|
|
/* Helper function called while driver lock is held and vm is active. */
|
|
|
|
int
|
|
|
|
qemuMigrationToFile(struct qemud_driver *driver, virDomainObjPtr vm,
|
|
|
|
int fd, off_t offset, const char *path,
|
|
|
|
const char *compressor,
|
|
|
|
bool is_reg, bool bypassSecurityDriver)
|
|
|
|
{
|
|
|
|
qemuDomainObjPrivatePtr priv = vm->privateData;
|
|
|
|
virCgroupPtr cgroup = NULL;
|
|
|
|
int ret = -1;
|
|
|
|
int rc;
|
|
|
|
bool restoreLabel = false;
|
2011-03-25 12:02:27 -05:00
|
|
|
virCommandPtr cmd = NULL;
|
|
|
|
int pipeFD[2] = { -1, -1 };
|
2011-03-09 18:35:13 -06:00
|
|
|
|
2011-05-04 07:12:57 -05:00
|
|
|
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) &&
|
2011-03-25 12:02:27 -05:00
|
|
|
(!compressor || pipe(pipeFD) == 0)) {
|
2011-03-09 18:49:54 -06:00
|
|
|
/* All right! We can use fd migration, which means that qemu
|
2011-03-28 16:50:22 -05:00
|
|
|
* doesn't have to open() the file, so while we still have to
|
|
|
|
* grant SELinux access, we can do it on fd and avoid cleanup
|
|
|
|
* later, as well as skip futzing with cgroup. */
|
|
|
|
if (virSecurityManagerSetFDLabel(driver->securityManager, vm,
|
|
|
|
compressor ? pipeFD[1] : fd) < 0)
|
|
|
|
goto cleanup;
|
2011-03-09 18:49:54 -06:00
|
|
|
bypassSecurityDriver = true;
|
|
|
|
} else {
|
|
|
|
/* Phooey - we have to fall back on exec migration, where qemu
|
|
|
|
* has to popen() the file by name. We might also stumble on
|
|
|
|
* a race present in some qemu versions where it does a wait()
|
|
|
|
* that botches pclose. */
|
|
|
|
if (!is_reg &&
|
|
|
|
qemuCgroupControllerActive(driver,
|
|
|
|
VIR_CGROUP_CONTROLLER_DEVICES)) {
|
|
|
|
if (virCgroupForDomain(driver->cgroup, vm->def->name,
|
|
|
|
&cgroup, 0) != 0) {
|
|
|
|
qemuReportError(VIR_ERR_INTERNAL_ERROR,
|
|
|
|
_("Unable to find cgroup for %s"),
|
|
|
|
vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
rc = virCgroupAllowDevicePath(cgroup, path,
|
|
|
|
VIR_CGROUP_DEVICE_RW);
|
|
|
|
qemuAuditCgroupPath(vm, cgroup, "allow", path, "rw", rc);
|
|
|
|
if (rc < 0) {
|
|
|
|
virReportSystemError(-rc,
|
|
|
|
_("Unable to allow device %s for %s"),
|
|
|
|
path, vm->def->name);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2011-03-09 18:35:13 -06:00
|
|
|
}
|
2011-03-09 18:49:54 -06:00
|
|
|
if ((!bypassSecurityDriver) &&
|
|
|
|
virSecurityManagerSetSavedStateLabel(driver->securityManager,
|
|
|
|
vm, path) < 0)
|
2011-03-09 18:35:13 -06:00
|
|
|
goto cleanup;
|
2011-03-09 18:49:54 -06:00
|
|
|
restoreLabel = true;
|
2011-03-09 18:35:13 -06:00
|
|
|
}
|
|
|
|
|
2011-03-09 18:49:54 -06:00
|
|
|
qemuDomainObjEnterMonitorWithDriver(driver, vm);
|
2011-03-09 18:35:13 -06:00
|
|
|
if (!compressor) {
|
|
|
|
const char *args[] = { "cat", NULL };
|
|
|
|
|
2011-05-04 07:12:57 -05:00
|
|
|
if (qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD) &&
|
2011-03-09 18:35:13 -06:00
|
|
|
priv->monConfig->type == VIR_DOMAIN_CHR_TYPE_UNIX) {
|
|
|
|
rc = qemuMonitorMigrateToFd(priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATE_BACKGROUND,
|
|
|
|
fd);
|
|
|
|
} else {
|
|
|
|
rc = qemuMonitorMigrateToFile(priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATE_BACKGROUND,
|
|
|
|
args, path, offset);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
const char *prog = compressor;
|
|
|
|
const char *args[] = {
|
|
|
|
prog,
|
|
|
|
"-c",
|
|
|
|
NULL
|
|
|
|
};
|
2011-03-25 12:02:27 -05:00
|
|
|
if (pipeFD[0] != -1) {
|
|
|
|
cmd = virCommandNewArgs(args);
|
|
|
|
virCommandSetInputFD(cmd, pipeFD[0]);
|
|
|
|
virCommandSetOutputFD(cmd, &fd);
|
|
|
|
if (virSetCloseExec(pipeFD[1]) < 0) {
|
|
|
|
virReportSystemError(errno, "%s",
|
|
|
|
_("Unable to set cloexec flag"));
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
if (virCommandRunAsync(cmd, NULL) < 0) {
|
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
|
|
|
goto cleanup;
|
|
|
|
}
|
|
|
|
rc = qemuMonitorMigrateToFd(priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATE_BACKGROUND,
|
|
|
|
pipeFD[1]);
|
|
|
|
if (VIR_CLOSE(pipeFD[0]) < 0 ||
|
|
|
|
VIR_CLOSE(pipeFD[1]) < 0)
|
2011-05-09 04:24:09 -05:00
|
|
|
VIR_WARN("failed to close intermediate pipe");
|
2011-03-25 12:02:27 -05:00
|
|
|
} else {
|
|
|
|
rc = qemuMonitorMigrateToFile(priv->mon,
|
|
|
|
QEMU_MONITOR_MIGRATE_BACKGROUND,
|
|
|
|
args, path, offset);
|
|
|
|
}
|
2011-03-09 18:35:13 -06:00
|
|
|
}
|
2011-03-09 18:49:54 -06:00
|
|
|
qemuDomainObjExitMonitorWithDriver(driver, vm);
|
2011-03-09 18:35:13 -06:00
|
|
|
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
rc = qemuMigrationWaitForCompletion(driver, vm);
|
|
|
|
|
|
|
|
if (rc < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-03-25 12:02:27 -05:00
|
|
|
if (cmd && virCommandWait(cmd, NULL) < 0)
|
|
|
|
goto cleanup;
|
|
|
|
|
2011-03-09 18:35:13 -06:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
cleanup:
|
2011-03-25 12:02:27 -05:00
|
|
|
VIR_FORCE_CLOSE(pipeFD[0]);
|
|
|
|
VIR_FORCE_CLOSE(pipeFD[1]);
|
|
|
|
virCommandFree(cmd);
|
2011-03-09 18:35:13 -06:00
|
|
|
if (restoreLabel && (!bypassSecurityDriver) &&
|
|
|
|
virSecurityManagerRestoreSavedStateLabel(driver->securityManager,
|
|
|
|
vm, path) < 0)
|
|
|
|
VIR_WARN("failed to restore save state label on %s", path);
|
|
|
|
|
|
|
|
if (cgroup != NULL) {
|
|
|
|
rc = virCgroupDenyDevicePath(cgroup, path,
|
|
|
|
VIR_CGROUP_DEVICE_RWM);
|
|
|
|
qemuAuditCgroupPath(vm, cgroup, "deny", path, "rwm", rc);
|
|
|
|
if (rc < 0)
|
|
|
|
VIR_WARN("Unable to deny device %s for %s %d",
|
|
|
|
path, vm->def->name, rc);
|
|
|
|
virCgroupFree(&cgroup);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|