mirror of
https://github.com/virt-manager/virt-manager.git
synced 2025-02-25 18:55:27 -06:00
asyncjob: Simplify error reporting
This commit is contained in:
parent
a1e54f8476
commit
d0d20148f7
@ -989,25 +989,35 @@ class vmmAddHardware(vmmGObjectUI):
|
|||||||
######################
|
######################
|
||||||
|
|
||||||
def setup_device(self):
|
def setup_device(self):
|
||||||
if (self._dev.virtual_device_type ==
|
if (self._dev.virtual_device_type !=
|
||||||
virtinst.VirtualDevice.VIRTUAL_DEV_DISK):
|
virtinst.VirtualDevice.VIRTUAL_DEV_DISK):
|
||||||
progWin = vmmAsyncJob(self.do_file_allocate,
|
|
||||||
[self._dev],
|
|
||||||
title=_("Creating Storage File"),
|
|
||||||
text=_("Allocation of disk storage may take "
|
|
||||||
"a few minutes to complete."))
|
|
||||||
progWin.run()
|
|
||||||
|
|
||||||
error, details = progWin.get_error()
|
|
||||||
if error:
|
|
||||||
return (error, details)
|
|
||||||
|
|
||||||
else:
|
|
||||||
self._dev.setup_dev(self.conn.vmm)
|
self._dev.setup_dev(self.conn.vmm)
|
||||||
|
return
|
||||||
|
|
||||||
|
def do_file_allocate(asyncjob, disk):
|
||||||
|
meter = vmmCreateMeter(asyncjob)
|
||||||
|
|
||||||
|
# If creating disk via storage API, we need to thread
|
||||||
|
# off a new connection
|
||||||
|
if disk.vol_install:
|
||||||
|
newconn = util.dup_lib_conn(disk.conn)
|
||||||
|
disk.conn = newconn
|
||||||
|
logging.debug("Starting background file allocate process")
|
||||||
|
disk.setup_dev(self.conn.vmm, meter=meter)
|
||||||
|
logging.debug("Allocation completed")
|
||||||
|
|
||||||
|
progWin = vmmAsyncJob(do_file_allocate,
|
||||||
|
[self._dev],
|
||||||
|
title=_("Creating Storage File"),
|
||||||
|
text=_("Allocation of disk storage may take "
|
||||||
|
"a few minutes to complete."))
|
||||||
|
|
||||||
|
return progWin.run()
|
||||||
|
|
||||||
|
|
||||||
def add_device(self):
|
def add_device(self):
|
||||||
ret = self.setup_device()
|
ret = self.setup_device()
|
||||||
if ret:
|
if ret and ret[0]:
|
||||||
# Encountered an error
|
# Encountered an error
|
||||||
return (True, ret)
|
return (True, ret)
|
||||||
|
|
||||||
@ -1046,24 +1056,6 @@ class vmmAddHardware(vmmGObjectUI):
|
|||||||
|
|
||||||
return (False, None)
|
return (False, None)
|
||||||
|
|
||||||
def do_file_allocate(self, disk, asyncjob):
|
|
||||||
meter = vmmCreateMeter(asyncjob)
|
|
||||||
newconn = None
|
|
||||||
try:
|
|
||||||
# If creating disk via storage API, we need to thread
|
|
||||||
# off a new connection
|
|
||||||
if disk.vol_install:
|
|
||||||
newconn = util.dup_lib_conn(disk.conn)
|
|
||||||
disk.conn = newconn
|
|
||||||
logging.debug("Starting background file allocate process")
|
|
||||||
disk.setup_dev(self.conn.vmm, meter=meter)
|
|
||||||
logging.debug("Allocation completed")
|
|
||||||
except Exception, e:
|
|
||||||
details = (_("Unable to complete install: '%s'") %
|
|
||||||
"".join(traceback.format_exc()))
|
|
||||||
error = _("Unable to complete install: '%s'") % str(e)
|
|
||||||
asyncjob.set_error(error, details)
|
|
||||||
|
|
||||||
|
|
||||||
###########################
|
###########################
|
||||||
# Page validation methods #
|
# Page validation methods #
|
||||||
|
@ -20,6 +20,8 @@
|
|||||||
|
|
||||||
import logging
|
import logging
|
||||||
import threading
|
import threading
|
||||||
|
import traceback
|
||||||
|
|
||||||
import gtk
|
import gtk
|
||||||
import gobject
|
import gobject
|
||||||
|
|
||||||
@ -30,38 +32,35 @@ from virtManager.baseclass import vmmGObjectUI
|
|||||||
# code in the run() method every now & then
|
# code in the run() method every now & then
|
||||||
class asyncJobWorker(threading.Thread):
|
class asyncJobWorker(threading.Thread):
|
||||||
def __init__(self, callback, args):
|
def __init__(self, callback, args):
|
||||||
threading.Thread.__init__(self, target=callback, args=args)
|
args = [callback] + args
|
||||||
|
threading.Thread.__init__(self, target=cb_wrapper, args=args)
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
threading.Thread.run(self)
|
threading.Thread.run(self)
|
||||||
|
|
||||||
|
def cb_wrapper(callback, asyncjob, *args, **kwargs):
|
||||||
|
try:
|
||||||
|
callback(asyncjob, *args, **kwargs)
|
||||||
|
except Exception, e:
|
||||||
|
asyncjob.set_error(e, "".join(traceback.format_exc()))
|
||||||
|
|
||||||
# Displays a progress bar while executing the "callback" method.
|
# Displays a progress bar while executing the "callback" method.
|
||||||
class vmmAsyncJob(vmmGObjectUI):
|
class vmmAsyncJob(vmmGObjectUI):
|
||||||
|
|
||||||
def __init__(self, callback, args=None,
|
def __init__(self, callback, args, title, text,
|
||||||
text=_("Please wait a few moments..."),
|
|
||||||
title=_("Operation in progress"),
|
|
||||||
run_main=True, cancel_back=None, cancel_args=None):
|
run_main=True, cancel_back=None, cancel_args=None):
|
||||||
vmmGObjectUI.__init__(self, "vmm-progress.glade", "vmm-progress")
|
vmmGObjectUI.__init__(self, "vmm-progress.glade", "vmm-progress")
|
||||||
|
|
||||||
self.topwin.set_title(title)
|
|
||||||
|
|
||||||
self.window.signal_autoconnect({
|
|
||||||
"on_async_job_delete_event" : self.delete,
|
|
||||||
"on_async_job_cancel_clicked" : self.cancel,
|
|
||||||
})
|
|
||||||
|
|
||||||
self.run_main = bool(run_main)
|
self.run_main = bool(run_main)
|
||||||
self.cancel_job = cancel_back
|
self.cancel_job = cancel_back
|
||||||
self.cancel_args = cancel_args or []
|
self.cancel_args = cancel_args or []
|
||||||
self.cancel_args.append(self)
|
self.cancel_args = [self] + self.cancel_args
|
||||||
if self.cancel_job:
|
if self.cancel_job:
|
||||||
self.window.get_widget("cancel-async-job").show()
|
self.window.get_widget("cancel-async-job").show()
|
||||||
else:
|
else:
|
||||||
self.window.get_widget("cancel-async-job").hide()
|
self.window.get_widget("cancel-async-job").hide()
|
||||||
self.job_canceled = False
|
self.job_canceled = False
|
||||||
|
|
||||||
# Callback sets this if there is an error
|
|
||||||
self._error_info = None
|
self._error_info = None
|
||||||
self._data = None
|
self._data = None
|
||||||
|
|
||||||
@ -69,11 +68,18 @@ class vmmAsyncJob(vmmGObjectUI):
|
|||||||
self.pbar = self.window.get_widget("pbar")
|
self.pbar = self.window.get_widget("pbar")
|
||||||
self.window.get_widget("pbar-text").set_text(text)
|
self.window.get_widget("pbar-text").set_text(text)
|
||||||
|
|
||||||
args.append(self)
|
args = [self] + args
|
||||||
self.bg_thread = asyncJobWorker(callback, args)
|
self.bg_thread = asyncJobWorker(callback, args)
|
||||||
self.bg_thread.setDaemon(True)
|
self.bg_thread.setDaemon(True)
|
||||||
self.is_pulsing = True
|
self.is_pulsing = True
|
||||||
|
|
||||||
|
self.window.signal_autoconnect({
|
||||||
|
"on_async_job_delete_event" : self.delete,
|
||||||
|
"on_async_job_cancel_clicked" : self.cancel,
|
||||||
|
})
|
||||||
|
|
||||||
|
self.topwin.set_title(title)
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
timer = util.safe_timeout_add(100, self.exit_if_necessary)
|
timer = util.safe_timeout_add(100, self.exit_if_necessary)
|
||||||
self.topwin.present()
|
self.topwin.present()
|
||||||
@ -98,6 +104,7 @@ class vmmAsyncJob(vmmGObjectUI):
|
|||||||
self.exit_if_necessary(force_exit=True)
|
self.exit_if_necessary(force_exit=True)
|
||||||
|
|
||||||
self.topwin.destroy()
|
self.topwin.destroy()
|
||||||
|
return self._get_error()
|
||||||
|
|
||||||
def delete(self, ignore1=None, ignore2=None):
|
def delete(self, ignore1=None, ignore2=None):
|
||||||
thread_active = (self.bg_thread.isAlive() or not self.run_main)
|
thread_active = (self.bg_thread.isAlive() or not self.run_main)
|
||||||
@ -175,7 +182,7 @@ class vmmAsyncJob(vmmGObjectUI):
|
|||||||
def set_error(self, error, details):
|
def set_error(self, error, details):
|
||||||
self._error_info = (error, details)
|
self._error_info = (error, details)
|
||||||
|
|
||||||
def get_error(self):
|
def _get_error(self):
|
||||||
if not self._error_info:
|
if not self._error_info:
|
||||||
return (None, None)
|
return (None, None)
|
||||||
return self._error_info
|
return self._error_info
|
||||||
|
@ -38,7 +38,7 @@ class vmmGObject(gobject.GObject):
|
|||||||
|
|
||||||
def get_hal_helper(self):
|
def get_hal_helper(self):
|
||||||
from virtManager import halhelper
|
from virtManager import halhelper
|
||||||
return virtManager.halhelper.get_hal_helper()
|
return halhelper.get_hal_helper()
|
||||||
|
|
||||||
class vmmGObjectUI(vmmGObject):
|
class vmmGObjectUI(vmmGObject):
|
||||||
def __init__(self, filename, windowname):
|
def __init__(self, filename, windowname):
|
||||||
|
@ -709,51 +709,39 @@ class vmmCloneVM(vmmGObjectUI):
|
|||||||
if self.clone_design.clone_devices:
|
if self.clone_design.clone_devices:
|
||||||
text = title + _(" and selected storage (this may take a while)")
|
text = title + _(" and selected storage (this may take a while)")
|
||||||
|
|
||||||
progWin = vmmAsyncJob(self._async_clone, [],
|
progWin = vmmAsyncJob(self._async_clone, [], title, text)
|
||||||
title=title, text=text)
|
error, details = progWin.run()
|
||||||
progWin.run()
|
|
||||||
error, details = progWin.get_error()
|
|
||||||
|
|
||||||
self.topwin.set_sensitive(True)
|
self.topwin.set_sensitive(True)
|
||||||
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
||||||
|
|
||||||
if error is not None:
|
if error is not None:
|
||||||
self.err.show_err(error, details)
|
error = (_("Error creating virtual machine clone '%s': %s") %
|
||||||
|
(self.clone_design.clone_name, error))
|
||||||
|
self.err.show_err(error, error + "\n" + details)
|
||||||
else:
|
else:
|
||||||
self.close()
|
self.close()
|
||||||
self.conn.tick(noStatsUpdate=True)
|
self.conn.tick(noStatsUpdate=True)
|
||||||
|
|
||||||
def _async_clone(self, asyncjob):
|
def _async_clone(self, asyncjob):
|
||||||
newconn = None
|
newconn = None
|
||||||
error = None
|
|
||||||
details = None
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
try:
|
self.orig_vm.set_cloning(True)
|
||||||
self.orig_vm.set_cloning(True)
|
|
||||||
|
|
||||||
# Open a seperate connection to install on since this is async
|
# Open a seperate connection to install on since this is async
|
||||||
logging.debug("Threading off connection to clone VM.")
|
logging.debug("Threading off connection to clone VM.")
|
||||||
newconn = util.dup_conn(self.conn).vmm
|
newconn = util.dup_conn(self.conn).vmm
|
||||||
meter = vmmCreateMeter(asyncjob)
|
meter = vmmCreateMeter(asyncjob)
|
||||||
|
|
||||||
self.clone_design.orig_connection = newconn
|
self.clone_design.orig_connection = newconn
|
||||||
for d in self.clone_design.clone_virtual_disks:
|
for d in self.clone_design.clone_virtual_disks:
|
||||||
d.conn = newconn
|
d.conn = newconn
|
||||||
|
|
||||||
self.clone_design.setup()
|
self.clone_design.setup()
|
||||||
CloneManager.start_duplicate(self.clone_design, meter)
|
CloneManager.start_duplicate(self.clone_design, meter)
|
||||||
finally:
|
finally:
|
||||||
self.orig_vm.set_cloning(False)
|
self.orig_vm.set_cloning(False)
|
||||||
|
|
||||||
except Exception, e:
|
|
||||||
error = (_("Error creating virtual machine clone '%s': %s") %
|
|
||||||
(self.clone_design.clone_name, str(e)))
|
|
||||||
details = "".join(traceback.format_exc())
|
|
||||||
|
|
||||||
if error:
|
|
||||||
asyncjob.set_error(error, details)
|
|
||||||
return
|
|
||||||
|
|
||||||
def change_storage_browse(self, ignore):
|
def change_storage_browse(self, ignore):
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@
|
|||||||
import gobject
|
import gobject
|
||||||
import gtk
|
import gtk
|
||||||
|
|
||||||
import sys
|
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
import threading
|
import threading
|
||||||
@ -1583,16 +1582,14 @@ class vmmCreate(vmmGObjectUI):
|
|||||||
"and retrieval of the installation "
|
"and retrieval of the installation "
|
||||||
"images may take a few minutes to "
|
"images may take a few minutes to "
|
||||||
"complete."))
|
"complete."))
|
||||||
progWin.run()
|
error, details = progWin.run()
|
||||||
error, details = progWin.get_error()
|
|
||||||
|
|
||||||
if error != None:
|
|
||||||
self.err.show_err(error, details)
|
|
||||||
|
|
||||||
self.topwin.set_sensitive(True)
|
self.topwin.set_sensitive(True)
|
||||||
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
||||||
|
|
||||||
if error:
|
if error:
|
||||||
|
error = (_("Unable to complete install: '%s'") % error)
|
||||||
|
self.err.show_err(error, error + "\n" + details)
|
||||||
self.failed_guest = self.guest
|
self.failed_guest = self.guest
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -1610,51 +1607,33 @@ class vmmCreate(vmmGObjectUI):
|
|||||||
self.close()
|
self.close()
|
||||||
|
|
||||||
|
|
||||||
def do_install(self, guest, asyncjob):
|
def do_install(self, asyncjob, guest):
|
||||||
meter = vmmCreateMeter(asyncjob)
|
meter = vmmCreateMeter(asyncjob)
|
||||||
error = None
|
|
||||||
details = None
|
|
||||||
try:
|
|
||||||
logging.debug("Starting background install process")
|
|
||||||
|
|
||||||
guest.conn = util.dup_conn(self.conn).vmm
|
logging.debug("Starting background install process")
|
||||||
for dev in guest.get_all_devices():
|
|
||||||
dev.conn = guest.conn
|
|
||||||
|
|
||||||
dom = guest.start_install(False, meter = meter)
|
guest.conn = util.dup_conn(self.conn).vmm
|
||||||
if dom == None:
|
for dev in guest.get_all_devices():
|
||||||
error = _("Guest installation failed to complete")
|
dev.conn = guest.conn
|
||||||
details = error
|
|
||||||
logging.error("Guest install did not return a domain")
|
|
||||||
else:
|
|
||||||
logging.debug("Install completed")
|
|
||||||
|
|
||||||
# Make sure we pick up the domain object
|
guest.start_install(False, meter = meter)
|
||||||
self.conn.tick(noStatsUpdate=True)
|
logging.debug("Install completed")
|
||||||
vm = self.conn.get_vm(guest.uuid)
|
|
||||||
|
|
||||||
if vm.is_shutoff():
|
# Make sure we pick up the domain object
|
||||||
# Domain is already shutdown, but no error was raised.
|
self.conn.tick(noStatsUpdate=True)
|
||||||
# Probably means guest had no 'install' phase, as in
|
vm = self.conn.get_vm(guest.uuid)
|
||||||
# for live cds. Try to restart the domain.
|
|
||||||
vm.startup()
|
|
||||||
else:
|
|
||||||
# Register a status listener, which will restart the
|
|
||||||
# guest after the install has finished
|
|
||||||
util.connect_opt_out(vm, "status-changed",
|
|
||||||
self.check_install_status, guest)
|
|
||||||
|
|
||||||
except:
|
if vm.is_shutoff():
|
||||||
(_type, value, stacktrace) = sys.exc_info ()
|
# Domain is already shutdown, but no error was raised.
|
||||||
|
# Probably means guest had no 'install' phase, as in
|
||||||
|
# for live cds. Try to restart the domain.
|
||||||
|
vm.startup()
|
||||||
|
else:
|
||||||
|
# Register a status listener, which will restart the
|
||||||
|
# guest after the install has finished
|
||||||
|
util.connect_opt_out(vm, "status-changed",
|
||||||
|
self.check_install_status, guest)
|
||||||
|
|
||||||
# Detailed error message, in English so it can be Googled.
|
|
||||||
details = ("Unable to complete install '%s'" %
|
|
||||||
(str(_type) + " " + str(value) + "\n" +
|
|
||||||
traceback.format_exc (stacktrace)))
|
|
||||||
error = (_("Unable to complete install: '%s'") % str(value))
|
|
||||||
|
|
||||||
if error:
|
|
||||||
asyncjob.set_error(error, details)
|
|
||||||
|
|
||||||
def check_install_status(self, vm, ignore1, ignore2, virtinst_guest=None):
|
def check_install_status(self, vm, ignore1, ignore2, virtinst_guest=None):
|
||||||
if vm.is_crashed():
|
if vm.is_crashed():
|
||||||
|
@ -21,7 +21,6 @@
|
|||||||
import gobject
|
import gobject
|
||||||
import gtk
|
import gtk
|
||||||
|
|
||||||
import sys
|
|
||||||
import traceback
|
import traceback
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@ -1115,44 +1114,28 @@ class vmmCreateInterface(vmmGObjectUI):
|
|||||||
title=_("Creating virtual interface"),
|
title=_("Creating virtual interface"),
|
||||||
text=_("The virtual interface is now being "
|
text=_("The virtual interface is now being "
|
||||||
"created."))
|
"created."))
|
||||||
progWin.run()
|
error, details = progWin.run()
|
||||||
error, details = progWin.get_error()
|
|
||||||
|
|
||||||
if error != None:
|
|
||||||
self.err.show_err(error, details)
|
|
||||||
|
|
||||||
self.topwin.set_sensitive(True)
|
self.topwin.set_sensitive(True)
|
||||||
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
||||||
|
|
||||||
if error:
|
if error:
|
||||||
return
|
error = _("Error creating interface: '%s'") % error
|
||||||
|
self.err.show_err(error, error + "\n" + details)
|
||||||
|
else:
|
||||||
|
# FIXME: Hmm, shouldn't we emit a signal here rather than do this?
|
||||||
|
self.conn.tick(noStatsUpdate=True)
|
||||||
|
self.close()
|
||||||
|
|
||||||
# FIXME: Hmm, shouldn't we emit a signal here rather than do this?
|
def do_install(self, asyncjob, activate):
|
||||||
self.conn.tick(noStatsUpdate=True)
|
|
||||||
self.close()
|
|
||||||
|
|
||||||
|
|
||||||
def do_install(self, activate, asyncjob):
|
|
||||||
meter = vmmCreateMeter(asyncjob)
|
meter = vmmCreateMeter(asyncjob)
|
||||||
error = None
|
error = None
|
||||||
details = None
|
details = None
|
||||||
try:
|
|
||||||
self.interface.conn = util.dup_conn(self.conn).vmm
|
|
||||||
|
|
||||||
self.interface.install(meter, create=activate)
|
self.interface.conn = util.dup_conn(self.conn).vmm
|
||||||
logging.debug("Install completed")
|
|
||||||
except:
|
|
||||||
(_type, value, stacktrace) = sys.exc_info ()
|
|
||||||
|
|
||||||
# Detailed error message, in English so it can be Googled.
|
|
||||||
details = ("Error creating interface: '%s'" %
|
|
||||||
(str(_type) + " " + str(value) + "\n" +
|
|
||||||
traceback.format_exc (stacktrace)))
|
|
||||||
error = (_("Error creating interface: '%s'") % str(value))
|
|
||||||
|
|
||||||
if error:
|
|
||||||
asyncjob.set_error(error, details)
|
|
||||||
|
|
||||||
|
self.interface.install(meter, create=activate)
|
||||||
|
logging.debug("Install completed")
|
||||||
|
|
||||||
def show_help(self, ignore):
|
def show_help(self, ignore):
|
||||||
# No help available yet.
|
# No help available yet.
|
||||||
|
@ -396,35 +396,31 @@ class vmmCreatePool(vmmGObjectUI):
|
|||||||
title=_("Creating storage pool..."),
|
title=_("Creating storage pool..."),
|
||||||
text=_("Creating the storage pool may take a "
|
text=_("Creating the storage pool may take a "
|
||||||
"while..."))
|
"while..."))
|
||||||
progWin.run()
|
error, details = progWin.run()
|
||||||
error, details = progWin.get_error()
|
|
||||||
|
|
||||||
if error is not None:
|
|
||||||
self.err.show_err(error, details)
|
|
||||||
|
|
||||||
self.topwin.set_sensitive(True)
|
self.topwin.set_sensitive(True)
|
||||||
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
||||||
|
|
||||||
if not error:
|
if error:
|
||||||
|
error = _("Error creating pool: %s") % error
|
||||||
|
self.err.show_err(error, error + "\n" + details)
|
||||||
|
else:
|
||||||
self.close()
|
self.close()
|
||||||
|
|
||||||
def _async_pool_create(self, asyncjob):
|
def _async_pool_create(self, asyncjob, *args, **kwargs):
|
||||||
|
print args, kwargs
|
||||||
newconn = None
|
newconn = None
|
||||||
try:
|
|
||||||
# Open a seperate connection to install on since this is async
|
# Open a seperate connection to install on since this is async
|
||||||
newconn = util.dup_lib_conn(self._pool.conn)
|
newconn = util.dup_lib_conn(self._pool.conn)
|
||||||
meter = vmmCreateMeter(asyncjob)
|
meter = vmmCreateMeter(asyncjob)
|
||||||
self._pool.conn = newconn
|
self._pool.conn = newconn
|
||||||
|
|
||||||
logging.debug("Starting backround pool creation.")
|
logging.debug("Starting backround pool creation.")
|
||||||
build = self.window.get_widget("pool-build").get_active()
|
build = self.window.get_widget("pool-build").get_active()
|
||||||
poolobj = self._pool.install(create=True, meter=meter, build=build)
|
poolobj = self._pool.install(create=True, meter=meter, build=build)
|
||||||
poolobj.setAutostart(True)
|
poolobj.setAutostart(True)
|
||||||
logging.debug("Pool creating succeeded.")
|
logging.debug("Pool creating succeeded.")
|
||||||
except Exception, e:
|
|
||||||
error = _("Error creating pool: %s") % str(e)
|
|
||||||
details = "".join(traceback.format_exc())
|
|
||||||
asyncjob.set_error(error, details)
|
|
||||||
|
|
||||||
def page_changed(self, notebook_ignore, page_ignore, page_number):
|
def page_changed(self, notebook_ignore, page_ignore, page_number):
|
||||||
if page_number == PAGE_NAME:
|
if page_number == PAGE_NAME:
|
||||||
|
@ -203,38 +203,31 @@ class vmmCreateVolume(vmmGObjectUI):
|
|||||||
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
|
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
|
||||||
|
|
||||||
progWin = vmmAsyncJob(self._async_vol_create, [],
|
progWin = vmmAsyncJob(self._async_vol_create, [],
|
||||||
title=_("Creating storage volume..."),
|
_("Creating storage volume..."),
|
||||||
text=_("Creating the storage volume may take a "
|
_("Creating the storage volume may take a "
|
||||||
"while..."))
|
"while..."))
|
||||||
progWin.run()
|
error, details = progWin.run()
|
||||||
error, details = progWin.get_error()
|
|
||||||
|
|
||||||
if error is not None:
|
|
||||||
self.show_err(error, details)
|
|
||||||
|
|
||||||
self.topwin.set_sensitive(True)
|
self.topwin.set_sensitive(True)
|
||||||
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
||||||
|
|
||||||
if not error:
|
if error:
|
||||||
|
error = _("Error creating vol: %s") % error
|
||||||
|
self.show_err(error, error + "\n" + details)
|
||||||
|
else:
|
||||||
self.emit("vol-created")
|
self.emit("vol-created")
|
||||||
self.close()
|
self.close()
|
||||||
|
|
||||||
def _async_vol_create(self, asyncjob):
|
def _async_vol_create(self, asyncjob):
|
||||||
newconn = None
|
newconn = util.dup_conn(self.conn).vmm
|
||||||
try:
|
|
||||||
newconn = util.dup_conn(self.conn).vmm
|
|
||||||
|
|
||||||
# Lookup different pool obj
|
# Lookup different pool obj
|
||||||
newpool = newconn.storagePoolLookupByName(self.parent_pool.get_name())
|
newpool = newconn.storagePoolLookupByName(self.parent_pool.get_name())
|
||||||
self.vol.pool = newpool
|
self.vol.pool = newpool
|
||||||
|
|
||||||
meter = vmmCreateMeter(asyncjob)
|
meter = vmmCreateMeter(asyncjob)
|
||||||
logging.debug("Starting backround vol creation.")
|
logging.debug("Starting backround vol creation.")
|
||||||
self.vol.install(meter=meter)
|
self.vol.install(meter=meter)
|
||||||
except Exception, e:
|
|
||||||
error = _("Error creating vol: %s") % str(e)
|
|
||||||
details = "".join(traceback.format_exc())
|
|
||||||
asyncjob.set_error(error, details)
|
|
||||||
|
|
||||||
def validate(self):
|
def validate(self):
|
||||||
name = self.window.get_widget("vol-name").get_text()
|
name = self.window.get_widget("vol-name").get_text()
|
||||||
|
@ -132,8 +132,7 @@ class vmmDeleteDialog(vmmGObjectUI):
|
|||||||
|
|
||||||
progWin = vmmAsyncJob(self._async_delete, [devs],
|
progWin = vmmAsyncJob(self._async_delete, [devs],
|
||||||
title=title, text=text)
|
title=title, text=text)
|
||||||
progWin.run()
|
error, details = progWin.run()
|
||||||
error, details = progWin.get_error()
|
|
||||||
|
|
||||||
self.topwin.set_sensitive(True)
|
self.topwin.set_sensitive(True)
|
||||||
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
||||||
@ -145,11 +144,9 @@ class vmmDeleteDialog(vmmGObjectUI):
|
|||||||
self.conn.tick(noStatsUpdate=True)
|
self.conn.tick(noStatsUpdate=True)
|
||||||
|
|
||||||
|
|
||||||
def _async_delete(self, paths, asyncjob):
|
def _async_delete(self, asyncjob, paths):
|
||||||
newconn = None
|
newconn = None
|
||||||
storage_errors = []
|
storage_errors = []
|
||||||
error = None
|
|
||||||
details = None
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Open a seperate connection to install on since this is async
|
# Open a seperate connection to install on since this is async
|
||||||
|
@ -200,6 +200,7 @@ class vmmDomainBase(vmmLibvirtObject):
|
|||||||
# attempt may result in a lookup failure. If device is present
|
# attempt may result in a lookup failure. If device is present
|
||||||
# in the active XML, assume all is good.
|
# in the active XML, assume all is good.
|
||||||
if find_device(self._get_guest(), origdev):
|
if find_device(self._get_guest(), origdev):
|
||||||
|
logging.debug("Device in active config but not inactive config.")
|
||||||
return
|
return
|
||||||
|
|
||||||
raise RuntimeError(_("Could not find specified device in the "
|
raise RuntimeError(_("Could not find specified device in the "
|
||||||
|
@ -104,9 +104,9 @@ def check_packagekit(errbox):
|
|||||||
progWin = vmmAsyncJob(_do_async_search,
|
progWin = vmmAsyncJob(_do_async_search,
|
||||||
[session, pk_control],
|
[session, pk_control],
|
||||||
_("Searching for available hypervisors..."),
|
_("Searching for available hypervisors..."),
|
||||||
|
_("Searching for available hypervisors..."),
|
||||||
run_main=False)
|
run_main=False)
|
||||||
progWin.run()
|
error, ignore = progWin.run()
|
||||||
error, ignore = progWin.get_error()
|
|
||||||
if error:
|
if error:
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -146,7 +146,7 @@ def check_packagekit(errbox):
|
|||||||
|
|
||||||
return (True, LIBVIRT_DAEMON in do_install)
|
return (True, LIBVIRT_DAEMON in do_install)
|
||||||
|
|
||||||
def _do_async_search(session, pk_control, asyncjob):
|
def _do_async_search(asyncjob, session, pk_control):
|
||||||
found = []
|
found = []
|
||||||
try:
|
try:
|
||||||
for name in PACKAGEKIT_PACKAGES:
|
for name in PACKAGEKIT_PACKAGES:
|
||||||
@ -827,15 +827,16 @@ class vmmEngine(vmmGObject):
|
|||||||
progWin = vmmAsyncJob(self._save_callback,
|
progWin = vmmAsyncJob(self._save_callback,
|
||||||
[vm, path],
|
[vm, path],
|
||||||
_("Saving Virtual Machine"),
|
_("Saving Virtual Machine"),
|
||||||
|
_("Saving Virtual Machine"),
|
||||||
cancel_back=_cancel_back,
|
cancel_back=_cancel_back,
|
||||||
cancel_args=_cancel_args)
|
cancel_args=_cancel_args)
|
||||||
progWin.run()
|
error, details = progWin.run()
|
||||||
error, details = progWin.get_error()
|
|
||||||
|
|
||||||
if error is not None:
|
if error is not None:
|
||||||
src.err.show_err(_("Error saving domain: %s") % error, details)
|
error = _("Error saving domain: %s") % error
|
||||||
|
src.err.show_err(error, error + "\n" + details)
|
||||||
|
|
||||||
def _save_cancel(self, vm, asyncjob):
|
def _save_cancel(self, asyncjob, vm):
|
||||||
logging.debug("Cancelling save job")
|
logging.debug("Cancelling save job")
|
||||||
if not vm:
|
if not vm:
|
||||||
return
|
return
|
||||||
@ -850,18 +851,17 @@ class vmmEngine(vmmGObject):
|
|||||||
asyncjob.job_canceled = True
|
asyncjob.job_canceled = True
|
||||||
return
|
return
|
||||||
|
|
||||||
def _save_callback(self, vm, file_to_save, asyncjob):
|
def _save_callback(self, asyncjob, vm, file_to_save):
|
||||||
try:
|
try:
|
||||||
conn = util.dup_conn(vm.connection)
|
conn = util.dup_conn(vm.connection)
|
||||||
newvm = conn.get_vm(vm.get_uuid())
|
newvm = conn.get_vm(vm.get_uuid())
|
||||||
|
|
||||||
newvm.save(file_to_save)
|
newvm.save(file_to_save)
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
if not (isinstance(e, libvirt.libvirtError) and
|
# If job is cancelled, don't report error to user.
|
||||||
asyncjob.job_canceled):
|
if isinstance(e, libvirt.libvirtError) and asyncjob.job_canceled:
|
||||||
# If job is cancelled, we should not report the error
|
return
|
||||||
# to user.
|
raise e
|
||||||
asyncjob.set_error(str(e), "".join(traceback.format_exc()))
|
|
||||||
|
|
||||||
def _do_restore_domain(self, src, uri):
|
def _do_restore_domain(self, src, uri):
|
||||||
conn = self._lookup_connection(uri)
|
conn = self._lookup_connection(uri)
|
||||||
@ -879,23 +879,19 @@ class vmmEngine(vmmGObject):
|
|||||||
return
|
return
|
||||||
|
|
||||||
progWin = vmmAsyncJob(self._restore_saved_callback,
|
progWin = vmmAsyncJob(self._restore_saved_callback,
|
||||||
[path, conn], _("Restoring Virtual Machine"))
|
[path, conn],
|
||||||
progWin.run()
|
_("Restoring Virtual Machine"),
|
||||||
error, details = progWin.get_error()
|
_("Restoring Virtual Machine"))
|
||||||
|
error, details = progWin.run()
|
||||||
|
|
||||||
if error is not None:
|
if error is not None:
|
||||||
src.err.show_err(error, details,
|
error = _("Error restoring domain: %s") % error
|
||||||
title=_("Error restoring domain"))
|
src.err.show_err(error, error + "\n" + details)
|
||||||
|
|
||||||
def _restore_saved_callback(self, file_to_load, conn, asyncjob):
|
def _restore_saved_callback(self, asyncjob, file_to_load, conn):
|
||||||
try:
|
ignore = asyncjob
|
||||||
newconn = util.dup_conn(conn)
|
newconn = util.dup_conn(conn)
|
||||||
newconn.restore(file_to_load)
|
newconn.restore(file_to_load)
|
||||||
except Exception, e:
|
|
||||||
err = (_("Error restoring domain '%s': %s") %
|
|
||||||
(file_to_load, str(e)))
|
|
||||||
details = "".join(traceback.format_exc())
|
|
||||||
asyncjob.set_error(err, details)
|
|
||||||
|
|
||||||
def _do_destroy_domain(self, src, uri, uuid):
|
def _do_destroy_domain(self, src, uri, uuid):
|
||||||
conn = self._lookup_connection(uri)
|
conn = self._lookup_connection(uri)
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
import virtinst
|
import virtinst
|
||||||
from virtinst import Interface
|
from virtinst import Interface
|
||||||
|
|
||||||
|
from virtManager import util
|
||||||
from virtManager.libvirtobject import vmmLibvirtObject
|
from virtManager.libvirtobject import vmmLibvirtObject
|
||||||
|
|
||||||
class vmmInterface(vmmLibvirtObject):
|
class vmmInterface(vmmLibvirtObject):
|
||||||
@ -118,7 +119,7 @@ class vmmInterface(vmmLibvirtObject):
|
|||||||
|
|
||||||
return doc.serialize()
|
return doc.serialize()
|
||||||
|
|
||||||
self._redefine_xml(set_start_xml)
|
self._redefine(util.xml_parse_wrapper, set_start_xml)
|
||||||
|
|
||||||
|
|
||||||
def get_slaves(self):
|
def get_slaves(self):
|
||||||
@ -211,4 +212,19 @@ class vmmInterface(vmmLibvirtObject):
|
|||||||
ret = " %s\n" % ret
|
ret = " %s\n" % ret
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def _redefine(self, xml_func, *args):
|
||||||
|
"""
|
||||||
|
Helper function for altering a redefining VM xml
|
||||||
|
|
||||||
|
@param xml_func: Function to alter the running XML. Takes the
|
||||||
|
original XML as its first argument.
|
||||||
|
@param args: Extra arguments to pass to xml_func
|
||||||
|
"""
|
||||||
|
origxml = self._xml_to_redefine()
|
||||||
|
# Sanitize origxml to be similar to what we will get back
|
||||||
|
origxml = util.xml_parse_wrapper(origxml, lambda d, c: d.serialize())
|
||||||
|
|
||||||
|
newxml = xml_func(origxml, *args)
|
||||||
|
self._redefine_xml(newxml)
|
||||||
|
|
||||||
vmmLibvirtObject.type_register(vmmInterface)
|
vmmLibvirtObject.type_register(vmmInterface)
|
||||||
|
@ -51,9 +51,6 @@ class vmmLibvirtObject(vmmGObject):
|
|||||||
self._xml = None
|
self._xml = None
|
||||||
self._is_xml_valid = False
|
self._is_xml_valid = False
|
||||||
|
|
||||||
# Cached XML that accumulates changes to define
|
|
||||||
self._xml_to_define = None
|
|
||||||
|
|
||||||
# These should be set by the child classes if necessary
|
# These should be set by the child classes if necessary
|
||||||
self._inactive_xml_flags = 0
|
self._inactive_xml_flags = 0
|
||||||
self._active_xml_flags = 0
|
self._active_xml_flags = 0
|
||||||
@ -128,7 +125,7 @@ class vmmLibvirtObject(vmmGObject):
|
|||||||
# Internal API functions #
|
# Internal API functions #
|
||||||
##########################
|
##########################
|
||||||
|
|
||||||
def __xml_to_redefine(self):
|
def _xml_to_redefine(self):
|
||||||
return _sanitize_xml(self.get_xml(inactive=True))
|
return _sanitize_xml(self.get_xml(inactive=True))
|
||||||
|
|
||||||
def _redefine_helper(self, origxml, newxml):
|
def _redefine_helper(self, origxml, newxml):
|
||||||
@ -143,13 +140,15 @@ class vmmLibvirtObject(vmmGObject):
|
|||||||
self.get_name(), diff)
|
self.get_name(), diff)
|
||||||
|
|
||||||
self._define(newxml)
|
self._define(newxml)
|
||||||
|
else:
|
||||||
|
logging.debug("Redefine requested, but XML didn't change!")
|
||||||
|
|
||||||
# Make sure we have latest XML
|
# Make sure we have latest XML
|
||||||
self.refresh_xml(forcesignal=True)
|
self.refresh_xml(forcesignal=True)
|
||||||
return
|
return
|
||||||
|
|
||||||
def _redefine_xml(self, newxml):
|
def _redefine_xml(self, newxml):
|
||||||
origxml = self.__xml_to_redefine()
|
origxml = self._xml_to_redefine()
|
||||||
return self._redefine_helper(origxml, newxml)
|
return self._redefine_helper(origxml, newxml)
|
||||||
|
|
||||||
vmmGObject.type_register(vmmLibvirtObject)
|
vmmGObject.type_register(vmmLibvirtObject)
|
||||||
|
@ -447,22 +447,21 @@ class vmmMigrateDialog(vmmGObjectUI):
|
|||||||
progWin = vmmAsyncJob(self._async_migrate,
|
progWin = vmmAsyncJob(self._async_migrate,
|
||||||
[self.vm, destconn, uri, rate, live, secure,
|
[self.vm, destconn, uri, rate, live, secure,
|
||||||
max_downtime],
|
max_downtime],
|
||||||
title=_("Migrating VM '%s'" % self.vm.get_name()),
|
_("Migrating VM '%s'" % self.vm.get_name()),
|
||||||
text=(_("Migrating VM '%s' from %s to %s. "
|
(_("Migrating VM '%s' from %s to %s. "
|
||||||
"This may take awhile.") %
|
"This may take awhile.") %
|
||||||
(self.vm.get_name(), srchost, dsthost)),
|
(self.vm.get_name(), srchost, dsthost)),
|
||||||
cancel_back=_cancel_back,
|
cancel_back=_cancel_back,
|
||||||
cancel_args=_cancel_args)
|
cancel_args=_cancel_args)
|
||||||
progWin.run()
|
error, details = progWin.run()
|
||||||
error, details = progWin.get_error()
|
|
||||||
|
|
||||||
if error:
|
|
||||||
self.err.show_err(error, details)
|
|
||||||
|
|
||||||
self.topwin.set_sensitive(True)
|
self.topwin.set_sensitive(True)
|
||||||
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.TOP_LEFT_ARROW))
|
||||||
|
|
||||||
if error is None:
|
if error:
|
||||||
|
error = _("Unable to migrate guest: %s") % error
|
||||||
|
self.err.show_err(error, error + "\n" + details)
|
||||||
|
else:
|
||||||
self.conn.tick(noStatsUpdate=True)
|
self.conn.tick(noStatsUpdate=True)
|
||||||
destconn.tick(noStatsUpdate=True)
|
destconn.tick(noStatsUpdate=True)
|
||||||
self.close()
|
self.close()
|
||||||
@ -482,7 +481,7 @@ class vmmMigrateDialog(vmmGObjectUI):
|
|||||||
logging.warning("Error setting migrate downtime: %s" % e)
|
logging.warning("Error setting migrate downtime: %s" % e)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def cancel_migration(self, vm, asyncjob):
|
def cancel_migration(self, asyncjob, vm):
|
||||||
logging.debug("Cancelling migrate job")
|
logging.debug("Cancelling migrate job")
|
||||||
if not vm:
|
if not vm:
|
||||||
return
|
return
|
||||||
@ -497,43 +496,37 @@ class vmmMigrateDialog(vmmGObjectUI):
|
|||||||
asyncjob.job_canceled = True
|
asyncjob.job_canceled = True
|
||||||
return
|
return
|
||||||
|
|
||||||
def _async_migrate(self, origvm, origdconn, migrate_uri, rate, live,
|
def _async_migrate(self, asyncjob,
|
||||||
secure, max_downtime, asyncjob):
|
origvm, origdconn, migrate_uri, rate, live,
|
||||||
errinfo = None
|
secure, max_downtime):
|
||||||
try:
|
try:
|
||||||
try:
|
ignore = vmmCreateMeter(asyncjob)
|
||||||
ignore = vmmCreateMeter(asyncjob)
|
|
||||||
|
|
||||||
srcconn = util.dup_conn(origvm.get_connection())
|
srcconn = util.dup_conn(origvm.get_connection())
|
||||||
dstconn = util.dup_conn(origdconn)
|
dstconn = util.dup_conn(origdconn)
|
||||||
|
|
||||||
vminst = srcconn.vmm.lookupByName(origvm.get_name())
|
vminst = srcconn.vmm.lookupByName(origvm.get_name())
|
||||||
vm = vmmDomain(srcconn, vminst, vminst.UUID())
|
vm = vmmDomain(srcconn, vminst, vminst.UUID())
|
||||||
|
|
||||||
logging.debug("Migrating vm=%s from %s to %s", vm.get_name(),
|
logging.debug("Migrating vm=%s from %s to %s", vm.get_name(),
|
||||||
srcconn.get_uri(), dstconn.get_uri())
|
srcconn.get_uri(), dstconn.get_uri())
|
||||||
timer = None
|
timer = None
|
||||||
if max_downtime != 0:
|
if max_downtime != 0:
|
||||||
# 0 means that the spin box migrate-max-downtime does not
|
# 0 means that the spin box migrate-max-downtime does not
|
||||||
# be enabled.
|
# be enabled.
|
||||||
current_thread = threading.currentThread()
|
current_thread = threading.currentThread()
|
||||||
timer = util.safe_timeout_add(100,
|
timer = util.safe_timeout_add(100,
|
||||||
self._async_set_max_downtime,
|
self._async_set_max_downtime,
|
||||||
vm, max_downtime,
|
vm, max_downtime,
|
||||||
current_thread)
|
current_thread)
|
||||||
vm.migrate(dstconn, migrate_uri, rate, live, secure)
|
vm.migrate(dstconn, migrate_uri, rate, live, secure)
|
||||||
if timer:
|
if timer:
|
||||||
gobject.source_remove(timer)
|
gobject.source_remove(timer)
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
if not (isinstance(e, libvirt.libvirtError) and
|
# If job is cancelled, don't report error
|
||||||
asyncjob.job_canceled):
|
if isinstance(e, libvirt.libvirtError) and asyncjob.job_canceled:
|
||||||
# If job is cancelled, we should not report the error
|
return
|
||||||
# to user.
|
raise e
|
||||||
errinfo = (str(e), ("Unable to migrate guest:\n %s" %
|
|
||||||
"".join(traceback.format_exc())))
|
|
||||||
finally:
|
|
||||||
if errinfo:
|
|
||||||
asyncjob.set_error(errinfo[0], errinfo[1])
|
|
||||||
|
|
||||||
|
|
||||||
vmmGObjectUI.type_register(vmmMigrateDialog)
|
vmmGObjectUI.type_register(vmmMigrateDialog)
|
||||||
|
@ -53,7 +53,7 @@ class vmmStoragePool(vmmLibvirtObject):
|
|||||||
|
|
||||||
def set_active(self, state):
|
def set_active(self, state):
|
||||||
self.active = state
|
self.active = state
|
||||||
self._update_xml()
|
self.refresh_xml()
|
||||||
|
|
||||||
def is_active(self):
|
def is_active(self):
|
||||||
return self.active
|
return self.active
|
||||||
@ -67,11 +67,11 @@ class vmmStoragePool(vmmLibvirtObject):
|
|||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self.pool.create(0)
|
self.pool.create(0)
|
||||||
self._update_xml()
|
self.refresh_xml()
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
self.pool.destroy()
|
self.pool.destroy()
|
||||||
self._update_xml()
|
self.refresh_xml()
|
||||||
|
|
||||||
def delete(self, nodelete=True):
|
def delete(self, nodelete=True):
|
||||||
if nodelete:
|
if nodelete:
|
||||||
|
@ -234,8 +234,10 @@ def browse_local(parent, dialog_name, conn, start_folder=None,
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def dup_lib_conn(libconn):
|
def dup_lib_conn(libconn):
|
||||||
vmmconn = _dup_all_conn(None, libconn)
|
conn = _dup_all_conn(None, libconn)
|
||||||
return vmmconn.vmm
|
if isinstance(conn, virtManager.connection.vmmConnection):
|
||||||
|
return conn.vmm
|
||||||
|
return conn
|
||||||
|
|
||||||
def dup_conn(conn):
|
def dup_conn(conn):
|
||||||
return _dup_all_conn(conn, None)
|
return _dup_all_conn(conn, None)
|
||||||
|
Loading…
Reference in New Issue
Block a user