Drop use of gtk threading

In general it complicates things for minor simplifications in some
cases. I think it's better just to learn that anything invoked from
a thread that _might_ touch the UI should just be dispatched in an
idle callback. Anything UI bits that we need immediately in a thread
can just use some creative trickery (like is done in connectauth) to
block in a safe manner.
This commit is contained in:
Cole Robinson
2012-02-10 14:07:51 -05:00
parent 239028e173
commit 0c507ac98b
18 changed files with 111 additions and 102 deletions

View File

@@ -299,7 +299,7 @@ def main():
icon_theme = gtk.icon_theme_get_default()
icon_theme.prepend_search_path(icon_dir)
gtk.gdk.threads_init()
gobject.threads_init()
import dbus
import dbus.glib
@@ -363,19 +363,15 @@ def main():
# or the dbus comms to existing instance has failed
# Finally start the app for real
gtk.gdk.threads_enter()
try:
show_engine(engine, options.show, options.uri, options.uuid,
options.no_conn_auto)
if options.profile != None:
import hotshot
prof = hotshot.Profile(options.profile)
prof.runcall(gtk.main)
prof.close()
else:
gtk.main()
finally:
gtk.gdk.threads_leave()
show_engine(engine, options.show, options.uri, options.uuid,
options.no_conn_auto)
if options.profile != None:
import hotshot
prof = hotshot.Profile(options.profile)
prof.runcall(gtk.main)
prof.close()
else:
gtk.main()
if __name__ == "__main__":
try:

View File

@@ -426,7 +426,7 @@ class vmmAddHardware(vmmGObjectUI):
label_widget = self.widget("phys-hd-label")
label_widget.set_markup("")
if not self.host_storage_timer:
self.host_storage_timer = self.safe_timeout_add(3 * 1000,
self.host_storage_timer = self.timeout_add(3 * 1000,
uihelpers.host_space_tick,
self.conn,
label_widget)

View File

@@ -130,7 +130,7 @@ def _simple_async(callback, args, title, text, parent, errorintro,
def idle_wrapper(fn):
def wrapped(self, *args, **kwargs):
return self.safe_idle_add(fn, self, *args, **kwargs)
return self.idle_add(fn, self, *args, **kwargs)
return wrapped
# Displays a progress bar while executing the "callback" method.
@@ -317,7 +317,7 @@ class vmmAsyncJob(vmmGObjectUI):
if not self.is_pulsing or not self.show_progress:
return True
self.safe_idle_add(self.pbar.pulse)
self.idle_add(self.pbar.pulse)
return True
@idle_wrapper

View File

@@ -28,13 +28,6 @@ from virtManager import util
running_config, gobject, GObject, gtk = virtManager.guidiff.get_imports()
def _safe_wrapper(func, *args):
gtk.gdk.threads_enter()
try:
return func(*args)
finally:
gtk.gdk.threads_leave()
class vmmGObject(GObject):
@staticmethod
@@ -191,23 +184,23 @@ class vmmGObject(GObject):
self.emit(_s, *_a)
return False
self.safe_idle_add(emitwrap, signal, *args)
self.idle_add(emitwrap, signal, *args)
def safe_idle_add(self, func, *args):
def idle_add(self, func, *args):
"""
Make sure idle functions are run thread safe
"""
if not hasattr(gobject, "idle_add"):
return func(*args)
return gobject.idle_add(_safe_wrapper, func, *args)
return gobject.idle_add(func, *args)
def safe_timeout_add(self, timeout, func, *args):
def timeout_add(self, timeout, func, *args):
"""
Make sure timeout functions are run thread safe
"""
if not hasattr(gobject, "timeout_add"):
return
return gobject.timeout_add(timeout, _safe_wrapper, func, *args)
return gobject.timeout_add(timeout, func, *args)
def emit(self, signal_name, *args):
if hasattr(GObject, "emit"):

View File

@@ -807,8 +807,7 @@ class vmmCloneVM(vmmGObjectUI):
if error is not None:
error = (_("Error creating virtual machine clone '%s': %s") %
(self.clone_design.clone_name, error))
self.err.show_err(error,
details=details)
self.err.show_err(error, details=details)
else:
self.close()
self.conn.tick(noStatsUpdate=True)

View File

@@ -20,6 +20,7 @@
import logging
import os
import time
import dbus
import libvirt
@@ -92,13 +93,24 @@ def creds_dialog(creds):
"""
Thread safe wrapper for libvirt openAuth user/pass callback
"""
try:
import gtk
import gobject
gtk.gdk.threads_enter()
return creds_dialog_main(creds)
finally:
gtk.gdk.threads_leave()
retipc = []
def wrapper(fn, creds):
try:
ret = fn(creds)
except:
logging.exception("Error from creds dialog")
ret = -1
retipc.append(ret)
gobject.idle_add(wrapper, creds_dialog_main, creds)
while not retipc:
time.sleep(.1)
return retipc[0]
def creds_dialog_main(creds):
@@ -163,11 +175,12 @@ def creds_dialog_main(creds):
for cred in creds:
cred[4] = entry[row].get_text()
row = row + 1
dialog.destroy()
return 0
ret = 0
else:
dialog.destroy()
return -1
ret = -1
dialog.destroy()
return ret
def acquire_tgt():

View File

@@ -1068,7 +1068,7 @@ class vmmConnection(vmmGObject):
# We want to kill off this thread asap, so schedule an
# idle event to inform the UI of result
logging.debug("Background open thread complete, scheduling notify")
self.safe_idle_add(self._open_notify)
self.idle_add(self._open_notify)
self.connectThread = None
def _open_notify(self):
@@ -1402,7 +1402,7 @@ class vmmConnection(vmmGObject):
for name in newNodedevs:
self.emit("nodedev-added", name)
self.safe_idle_add(tick_send_signals)
self.idle_add(tick_send_signals)
# Finally, we sample each domain
now = time.time()

View File

@@ -1013,7 +1013,7 @@ class vmmConsolePages(vmmGObjectUI):
logging.error("Too many connection failures, not retrying again")
return
self.safe_timeout_add(self.viewerRetryDelay, self.try_login)
self.timeout_add(self.viewerRetryDelay, self.try_login)
if self.viewerRetryDelay < 2000:
self.viewerRetryDelay = self.viewerRetryDelay * 2
@@ -1154,7 +1154,7 @@ class vmmConsolePages(vmmGObjectUI):
def unset_cb(src):
src.queue_resize_no_redraw()
self.safe_idle_add(restore_scroll, src)
self.idle_add(restore_scroll, src)
return False
def request_cb(src, req):
@@ -1164,7 +1164,7 @@ class vmmConsolePages(vmmGObjectUI):
src.disconnect(signal_id)
self.safe_idle_add(unset_cb, widget)
self.idle_add(unset_cb, widget)
return False
# Disable scroll bars while we resize, since resizing to the VM's

View File

@@ -417,7 +417,7 @@ class vmmCreate(vmmGObjectUI):
label_widget = self.widget("phys-hd-label")
label_widget.set_markup("")
if not self.host_storage_timer:
self.host_storage_timer = self.safe_timeout_add(3 * 1000,
self.host_storage_timer = self.timeout_add(3 * 1000,
uihelpers.host_space_tick,
self.conn,
label_widget)
@@ -1947,8 +1947,8 @@ class vmmCreate(vmmGObjectUI):
else:
# Register a status listener, which will restart the
# guest after the install has finished
vm.connect_opt_out("status-changed",
self.check_install_status, guest)
self.idle_add(vm.connect_opt_out, "status-changed",
self.check_install_status, guest)
def check_install_status(self, vm, ignore1, ignore2, virtinst_guest=None):
@@ -2057,7 +2057,7 @@ class vmmCreate(vmmGObjectUI):
detect_str = base + ("." * ((idx % 3) + 1))
self.set_distro_labels(detect_str, detect_str)
self.safe_timeout_add(500, self.check_detection,
self.timeout_add(500, self.check_detection,
idx + 1, forward)
return
@@ -2072,7 +2072,7 @@ class vmmCreate(vmmGObjectUI):
logging.debug("Finished OS detection.")
self.set_distro_selection(*results)
if forward:
self.safe_idle_add(self.forward, ())
self.idle_add(self.forward, ())
def start_detection(self, forward):
if self.detecting:

View File

@@ -471,8 +471,9 @@ class vmmCreatePool(vmmGObjectUI):
def finish(self):
self.topwin.set_sensitive(False)
self.topwin.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.WATCH))
build = self.widget("pool-build").get_active()
progWin = vmmAsyncJob(self._async_pool_create, [],
progWin = vmmAsyncJob(self._async_pool_create, [build],
_("Creating storage pool..."),
_("Creating the storage pool may take a "
"while..."),
@@ -489,7 +490,7 @@ class vmmCreatePool(vmmGObjectUI):
else:
self.close()
def _async_pool_create(self, asyncjob):
def _async_pool_create(self, asyncjob, build):
newconn = None
# Open a seperate connection to install on since this is async
@@ -498,7 +499,6 @@ class vmmCreatePool(vmmGObjectUI):
self._pool.conn = newconn
logging.debug("Starting backround pool creation.")
build = self.widget("pool-build").get_active()
poolobj = self._pool.install(create=True, meter=meter, build=build)
poolobj.setAutostart(True)
logging.debug("Pool creation succeeded")

View File

@@ -248,6 +248,7 @@ class vmmCreateVolume(vmmGObjectUI):
meter = asyncjob.get_meter()
logging.debug("Starting backround vol creation.")
self.vol.install(meter=meter)
logging.debug("vol creation complete.")
def validate(self):
name = self.widget("vol-name").get_text()

View File

@@ -266,8 +266,6 @@ class vmmDomain(vmmLibvirtObject):
# If manual shutdown or destroy specified, make sure we don't continue
# install process
def set_install_abort(self, val):
self._install_abort = bool(val)
def get_install_abort(self):
return bool(self._install_abort)
@@ -1054,12 +1052,16 @@ class vmmDomain(vmmLibvirtObject):
# Domain lifecycle methods #
############################
# All these methods are usually run asynchronously from threads, so
# let's be extra careful and have anything which might touch UI
# or gobject props invoked in an idle callback
def _unregister_reboot_listener(self):
if self.reboot_listener == None:
return
try:
self.disconnect(self.reboot_listener)
self.idle_add(self.disconnect, self.reboot_listener)
self.reboot_listener = None
except:
pass
@@ -1093,30 +1095,38 @@ class vmmDomain(vmmLibvirtObject):
# Request a shutdown
self.shutdown()
self.reboot_listener = self.connect_opt_out("status-changed",
def add_reboot():
self.reboot_listener = self.connect_opt_out("status-changed",
reboot_listener, self)
self.idle_add(add_reboot)
def shutdown(self):
self.set_install_abort(True)
self._install_abort = True
self._unregister_reboot_listener()
self._backend.shutdown()
self.force_update_status()
self.idle_add(self.force_update_status)
def reboot(self):
self.set_install_abort(True)
self._install_abort = True
self._backend.reboot(0)
self.force_update_status()
self.idle_add(self.force_update_status)
def destroy(self):
self._install_abort = True
self._unregister_reboot_listener()
self._backend.destroy()
self.idle_add(self.force_update_status)
def startup(self):
if self.get_cloning():
raise RuntimeError(_("Cannot start guest while cloning "
"operation in progress"))
self._backend.create()
self.force_update_status()
self.idle_add(self.force_update_status)
def suspend(self):
self._backend.suspend()
self.force_update_status()
self.idle_add(self.force_update_status)
def delete(self):
if self.hasSavedImage():
@@ -1132,7 +1142,7 @@ class vmmDomain(vmmLibvirtObject):
"operation in progress"))
self._backend.resume()
self.force_update_status()
self.idle_add(self.force_update_status)
def hasSavedImage(self):
if not self.managedsave_supported:
@@ -1140,7 +1150,7 @@ class vmmDomain(vmmLibvirtObject):
return self._backend.hasManagedSaveImage(0)
def save(self, filename=None, meter=None):
self.set_install_abort(True)
self._install_abort = True
if meter:
start_job_progress_thread(self, meter, _("Saving domain to disk"))
@@ -1150,15 +1160,8 @@ class vmmDomain(vmmLibvirtObject):
else:
self._backend.managedSave(0)
self.force_update_status()
self.idle_add(self.force_update_status)
def destroy(self):
self.set_install_abort(True)
self._unregister_reboot_listener()
self._backend.destroy()
self.force_update_status()
# Migrate methods
def support_downtime(self):
return support.check_domain_support(self._backend,
@@ -1169,7 +1172,7 @@ class vmmDomain(vmmLibvirtObject):
def migrate(self, destconn, interface=None, rate=0,
live=False, secure=False, meter=None):
self.set_install_abort(True)
self._install_abort = True
newname = None
@@ -1181,8 +1184,6 @@ class vmmDomain(vmmLibvirtObject):
flags |= libvirt.VIR_MIGRATE_PEER2PEER
flags |= libvirt.VIR_MIGRATE_TUNNELLED
newxml = self.get_xml(inactive=True)
logging.debug("Migrating: conn=%s flags=%s dname=%s uri=%s rate=%s",
destconn.vmm, flags, newname, interface, rate)
@@ -1190,7 +1191,11 @@ class vmmDomain(vmmLibvirtObject):
start_job_progress_thread(self, meter, _("Migrating domain"))
self._backend.migrate(destconn.vmm, flags, newname, interface, rate)
destconn.define_domain(newxml)
def define_cb():
newxml = self.get_xml(inactive=True)
destconn.define_domain(newxml)
self.idle_add(define_cb)
###################

View File

@@ -412,7 +412,7 @@ class vmmEngine(vmmGObject):
gobject.source_remove(self.timer)
self.timer = None
# No need to use 'safe_timeout_add', the tick should be
# No need to use 'timeout_add', the tick should be
# manually made thread safe
self.timer = gobject.timeout_add(interval, self.tick)
@@ -456,7 +456,7 @@ class vmmEngine(vmmGObject):
self.err.show_err(_("Error polling connection '%s': %s") %
(conn.get_uri(), e))
self.safe_idle_add(conn.close)
self.idle_add(conn.close)
return 1
@@ -928,8 +928,7 @@ class vmmEngine(vmmGObject):
if error is not None:
error = _("Error saving domain: %s") % error
src.err.show_err(error,
details=details)
src.err.show_err(error, details=details)
def _save_cancel(self, asyncjob, vm):
logging.debug("Cancelling save job")
@@ -1068,8 +1067,8 @@ class vmmEngine(vmmGObject):
except Exception, reboot_err:
no_support = virtinst.support.is_error_nosupport(reboot_err)
if not no_support:
src.err.show_err(_("Error rebooting domain: %s" %
str(reboot_err)))
raise RuntimeError(_("Error rebooting domain: %s" %
str(reboot_err)))
if not no_support:
return
@@ -1082,8 +1081,8 @@ class vmmEngine(vmmGObject):
logging.exception("Could not fake a reboot")
# Raise the original error message
src.err.show_err(_("Error rebooting domain: %s" %
str(reboot_err)))
raise RuntimeError(_("Error rebooting domain: %s" %
str(reboot_err)))
vmmAsyncJob.simple_async_noshow(reboot_cb, [], src, "")

View File

@@ -653,7 +653,7 @@ class vmmHost(vmmGObjectUI):
def cb():
try:
pool.refresh()
self.refresh_current_pool()
self.idle_add(self.refresh_current_pool)
finally:
self._in_refresh = False
@@ -673,8 +673,10 @@ class vmmHost(vmmGObjectUI):
def cb():
vol.delete()
self.refresh_current_pool()
self.populate_storage_volumes()
def idlecb():
self.refresh_current_pool()
self.populate_storage_volumes()
self.idle_add(idlecb)
logging.debug("Deleting volume '%s'", vol.get_name())
vmmAsyncJob.simple_async_noshow(cb, [], self,

View File

@@ -73,11 +73,11 @@ class vmmInterface(vmmLibvirtObject):
def start(self):
self.interface.create(0)
self.refresh_xml()
self.idle_add(self.refresh_xml)
def stop(self):
self.interface.destroy(0)
self.refresh_xml()
self.idle_add(self.refresh_xml)
def delete(self):
self.interface.undefine()

View File

@@ -549,13 +549,11 @@ class vmmMigrateDialog(vmmGObjectUI):
# 0 means that the spin box migrate-max-downtime does not
# be enabled.
current_thread = threading.currentThread()
timer = self.safe_timeout_add(100,
self._async_set_max_downtime,
vm, max_downtime,
current_thread)
timer = self.timeout_add(100, self._async_set_max_downtime,
vm, max_downtime, current_thread)
vm.migrate(dstconn, migrate_uri, rate, live, secure, meter=meter)
if timer:
gobject.source_remove(timer)
self.idle_add(gobject.source_remove, timer)
vmmGObjectUI.type_register(vmmMigrateDialog)

View File

@@ -161,7 +161,7 @@ class LibvirtConsoleConnection(ConsoleConnection):
queued_text = bool(self.streamToTerminal)
self.streamToTerminal += got
if not queued_text:
self.safe_idle_add(self.display_data, terminal)
self.idle_add(self.display_data, terminal)
if (events & libvirt.VIR_EVENT_HANDLE_WRITABLE and
self.terminalToStream):

View File

@@ -62,11 +62,11 @@ class vmmStoragePool(vmmLibvirtObject):
def start(self):
self.pool.create(0)
self.refresh_xml()
self.idle_add(self.refresh_xml)
def stop(self):
self.pool.destroy()
self.refresh_xml()
self.idle_add(self.refresh_xml)
def delete(self, nodelete=True):
if nodelete:
@@ -112,10 +112,13 @@ class vmmStoragePool(vmmLibvirtObject):
if not self.active:
return
def cb():
self.refresh_xml()
self.update_volumes(refresh=True)
self.emit("refreshed")
self.pool.refresh(0)
self.refresh_xml()
self.update_volumes(refresh=True)
self.emit("refreshed")
self.idle_add(cb)
def update_volumes(self, refresh=False):
if not self.is_active():