mirror of
https://github.com/virt-manager/virt-manager.git
synced 2025-02-10 23:45:49 -06:00
connection: Do non-VM polling on demand
The goal here is to reduce the amount of tick() polling that we do by default. For things like pools, networks, and interfaces, the constant polling is not very helpful and causes CPU churn and slowness for remote connections. Switch to a more on demand style. Pages that want new information for these objects now request a priority tick that only refreshes the info we want. This isn't perfect, but neither was the previous solution in the face of things like XML updates behind our back. The real solution here is libvirt event support across the board.
This commit is contained in:
parent
39a819c34b
commit
5e9e444dec
@ -106,6 +106,7 @@
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="border_width">6</property>
|
||||
<signal name="switch-page" handler="on_host_page_switch" swapped="no"/>
|
||||
<child>
|
||||
<object class="GtkVBox" id="vbox2">
|
||||
<property name="visible">True</property>
|
||||
|
@ -168,6 +168,9 @@ class vmmAddHardware(vmmGObjectUI):
|
||||
self.reset_state()
|
||||
self.topwin.set_transient_for(parent)
|
||||
self.topwin.present()
|
||||
self.conn.schedule_priority_tick(pollnet=True,
|
||||
pollpool=True, polliface=True,
|
||||
pollnodedev=True, pollmedia=True)
|
||||
|
||||
def close(self, ignore1=None, ignore2=None):
|
||||
logging.debug("Closing addhw")
|
||||
|
@ -71,6 +71,7 @@ class vmmChooseCD(vmmGObjectUI):
|
||||
self.reset_state()
|
||||
self.topwin.set_transient_for(parent)
|
||||
self.topwin.present()
|
||||
self.conn.schedule_priority_tick(pollnodedev=True, pollmedia=True)
|
||||
|
||||
def _cleanup(self):
|
||||
self.vm = None
|
||||
|
@ -816,7 +816,7 @@ class vmmCloneVM(vmmGObjectUI):
|
||||
self.err.show_err(error, details=details)
|
||||
else:
|
||||
self.close()
|
||||
self.conn.schedule_priority_tick()
|
||||
self.conn.schedule_priority_tick(pollvm=True)
|
||||
|
||||
def _async_clone(self, asyncjob):
|
||||
try:
|
||||
|
@ -930,7 +930,10 @@ class vmmConnection(vmmGObject):
|
||||
if self.state == self.STATE_ACTIVE:
|
||||
logging.debug("%s capabilities:\n%s",
|
||||
self.get_uri(), self.caps.xml)
|
||||
self.schedule_priority_tick()
|
||||
self.schedule_priority_tick(stats_update=True,
|
||||
pollvm=True, pollnet=True,
|
||||
pollpool=True, polliface=True,
|
||||
pollnodedev=True, pollmedia=True)
|
||||
|
||||
if self.state == self.STATE_DISCONNECTED:
|
||||
if self.connectError:
|
||||
@ -1000,8 +1003,11 @@ class vmmConnection(vmmGObject):
|
||||
|
||||
return (origlist, new, current)
|
||||
|
||||
def _update_nets(self):
|
||||
def _update_nets(self, dopoll):
|
||||
orig = self.nets.copy()
|
||||
if not dopoll:
|
||||
return {}, {}, orig
|
||||
|
||||
name = "network"
|
||||
active_list = self._backend.listNetworks
|
||||
inactive_list = self._backend.listDefinedNetworks
|
||||
@ -1013,8 +1019,11 @@ class vmmConnection(vmmGObject):
|
||||
active_list, inactive_list,
|
||||
lookup_func, build_func)
|
||||
|
||||
def _update_pools(self):
|
||||
def _update_pools(self, dopoll):
|
||||
orig = self.pools.copy()
|
||||
if not dopoll:
|
||||
return {}, {}, orig
|
||||
|
||||
name = "pool"
|
||||
active_list = self._backend.listStoragePools
|
||||
inactive_list = self._backend.listDefinedStoragePools
|
||||
@ -1026,8 +1035,11 @@ class vmmConnection(vmmGObject):
|
||||
active_list, inactive_list,
|
||||
lookup_func, build_func)
|
||||
|
||||
def _update_interfaces(self):
|
||||
def _update_interfaces(self, dopoll):
|
||||
orig = self.interfaces.copy()
|
||||
if not dopoll:
|
||||
return {}, {}, orig
|
||||
|
||||
name = "interface"
|
||||
active_list = self._backend.listInterfaces
|
||||
inactive_list = self._backend.listDefinedInterfaces
|
||||
@ -1040,8 +1052,11 @@ class vmmConnection(vmmGObject):
|
||||
lookup_func, build_func)
|
||||
|
||||
|
||||
def _update_nodedevs(self):
|
||||
def _update_nodedevs(self, dopoll):
|
||||
orig = self.nodedevs.copy()
|
||||
if not dopoll:
|
||||
return {}, {}, orig
|
||||
|
||||
name = "nodedev"
|
||||
active_list = lambda: self._backend.listDevices(None, 0)
|
||||
inactive_list = lambda: []
|
||||
@ -1053,7 +1068,7 @@ class vmmConnection(vmmGObject):
|
||||
active_list, inactive_list,
|
||||
lookup_func, build_func)
|
||||
|
||||
def _update_vms(self):
|
||||
def _update_vms(self, dopoll):
|
||||
# We can't easily use _poll_helper here because the domain API
|
||||
# doesn't always return names like other objects, it returns
|
||||
# IDs for active VMs
|
||||
@ -1066,6 +1081,8 @@ class vmmConnection(vmmGObject):
|
||||
origlist = self.vms.copy()
|
||||
current = {}
|
||||
new = {}
|
||||
if not dopoll:
|
||||
return current, new, origlist
|
||||
|
||||
# Build list of previous vms with proper id/name mappings
|
||||
for uuid in origlist:
|
||||
@ -1138,21 +1155,32 @@ class vmmConnection(vmmGObject):
|
||||
ignore = obj
|
||||
self.emit(signal, key)
|
||||
|
||||
def schedule_priority_tick(self, obj=None):
|
||||
self.emit("priority-tick", obj or self)
|
||||
def schedule_priority_tick(self, **kwargs):
|
||||
# args/kwargs are what is passed to def tick()
|
||||
if "stats_update" not in kwargs:
|
||||
kwargs["stats_update"] = False
|
||||
self.idle_emit("priority-tick", kwargs)
|
||||
|
||||
def tick(self, stats_update):
|
||||
def tick(self, stats_update,
|
||||
pollvm=False, pollnet=False,
|
||||
pollpool=False, polliface=False,
|
||||
pollnodedev=False, pollmedia=False):
|
||||
""" main update function: polls for new objects, updates stats, ..."""
|
||||
if self.state != self.STATE_ACTIVE:
|
||||
return
|
||||
|
||||
if not pollvm:
|
||||
stats_update = False
|
||||
|
||||
self.hostinfo = self._backend.getInfo()
|
||||
|
||||
(goneNets, newNets, nets) = self._update_nets()
|
||||
(gonePools, newPools, pools) = self._update_pools()
|
||||
(goneInterfaces, newInterfaces, interfaces) = self._update_interfaces()
|
||||
(goneNodedevs, newNodedevs, nodedevs) = self._update_nodedevs()
|
||||
(goneVMs, newVMs, vms) = self._update_vms()
|
||||
(goneNets, newNets, nets) = self._update_nets(pollnet)
|
||||
(gonePools, newPools, pools) = self._update_pools(pollpool)
|
||||
(goneInterfaces,
|
||||
newInterfaces, interfaces) = self._update_interfaces(polliface)
|
||||
(goneNodedevs,
|
||||
newNodedevs, nodedevs) = self._update_nodedevs(pollnodedev)
|
||||
(goneVMs, newVMs, vms) = self._update_vms(pollvm)
|
||||
|
||||
def tick_send_signals():
|
||||
"""
|
||||
@ -1178,17 +1206,17 @@ class vmmConnection(vmmGObject):
|
||||
self._init_mediadev()
|
||||
|
||||
# Update VM states
|
||||
for uuid in goneVMs:
|
||||
for uuid, obj in goneVMs.items():
|
||||
self.emit("vm-removed", uuid)
|
||||
goneVMs[uuid].cleanup()
|
||||
obj.cleanup()
|
||||
for uuid, obj in newVMs.items():
|
||||
ignore = obj
|
||||
self.emit("vm-added", uuid)
|
||||
|
||||
# Update virtual network states
|
||||
for uuid in goneNets:
|
||||
for uuid, obj in goneNets.items():
|
||||
self.emit("net-removed", uuid)
|
||||
goneNets[uuid].cleanup()
|
||||
obj.cleanup()
|
||||
for uuid, obj in newNets.items():
|
||||
obj.connect("started", self._obj_signal_proxy,
|
||||
"net-started", uuid)
|
||||
@ -1197,9 +1225,9 @@ class vmmConnection(vmmGObject):
|
||||
self.emit("net-added", uuid)
|
||||
|
||||
# Update storage pool states
|
||||
for uuid in gonePools:
|
||||
for uuid, obj in gonePools.items():
|
||||
self.emit("pool-removed", uuid)
|
||||
gonePools[uuid].cleanup()
|
||||
obj.cleanup()
|
||||
for uuid, obj in newPools.items():
|
||||
obj.connect("started", self._obj_signal_proxy,
|
||||
"pool-started", uuid)
|
||||
@ -1208,9 +1236,9 @@ class vmmConnection(vmmGObject):
|
||||
self.emit("pool-added", uuid)
|
||||
|
||||
# Update interface states
|
||||
for name in goneInterfaces:
|
||||
for name, obj in goneInterfaces.items():
|
||||
self.emit("interface-removed", name)
|
||||
goneInterfaces[name].cleanup()
|
||||
obj.cleanup()
|
||||
for name, obj in newInterfaces.items():
|
||||
obj.connect("started", self._obj_signal_proxy,
|
||||
"interface-started", name)
|
||||
@ -1235,15 +1263,22 @@ class vmmConnection(vmmGObject):
|
||||
if stats_update:
|
||||
updateVMs = vms
|
||||
|
||||
for key in vms:
|
||||
if key in updateVMs:
|
||||
add_to_ticklist([vms[key]], (True,))
|
||||
else:
|
||||
add_to_ticklist([vms[key]], (stats_update,))
|
||||
add_to_ticklist(nets.values())
|
||||
add_to_ticklist(pools.values())
|
||||
add_to_ticklist(interfaces.values())
|
||||
add_to_ticklist(self.mediadevs.values())
|
||||
if pollvm:
|
||||
for key in vms:
|
||||
if key in updateVMs:
|
||||
add_to_ticklist([vms[key]], (True,))
|
||||
else:
|
||||
add_to_ticklist([vms[key]], (stats_update,))
|
||||
if pollnet:
|
||||
add_to_ticklist(nets.values())
|
||||
if pollpool:
|
||||
add_to_ticklist(pools.values())
|
||||
if polliface:
|
||||
add_to_ticklist(interfaces.values())
|
||||
if pollnodedev:
|
||||
add_to_ticklist(nodedevs.values())
|
||||
if pollmedia:
|
||||
add_to_ticklist(self.mediadevs.values())
|
||||
|
||||
for obj, args in ticklist:
|
||||
try:
|
||||
|
@ -416,6 +416,10 @@ class vmmCreate(vmmGObjectUI):
|
||||
|
||||
def set_conn_state(self):
|
||||
# Update all state that has some dependency on the current connection
|
||||
self.conn.schedule_priority_tick(pollnet=True,
|
||||
pollpool=True, polliface=True,
|
||||
pollnodedev=True, pollmedia=True)
|
||||
|
||||
self.widget("create-forward").set_sensitive(True)
|
||||
|
||||
if self.conn.caps.no_install_options():
|
||||
@ -1899,7 +1903,7 @@ class vmmCreate(vmmGObjectUI):
|
||||
# Make sure we pick up the domain object
|
||||
|
||||
# Wait for VM to show up
|
||||
self.conn.schedule_priority_tick()
|
||||
self.conn.schedule_priority_tick(pollvm=True)
|
||||
count = 0
|
||||
while (guest.uuid not in self.conn.vms) and (count < 100):
|
||||
count += 1
|
||||
|
@ -1124,7 +1124,7 @@ class vmmCreateInterface(vmmGObjectUI):
|
||||
self.err.show_err(error,
|
||||
details=details)
|
||||
else:
|
||||
self.conn.schedule_priority_tick()
|
||||
self.conn.schedule_priority_tick(polliface=True)
|
||||
self.close()
|
||||
|
||||
def do_install(self, asyncjob, activate):
|
||||
|
@ -1013,7 +1013,7 @@ class vmmCreateNetwork(vmmGObjectUI):
|
||||
self.err.show_err(_("Error creating virtual network: %s" % str(e)))
|
||||
return
|
||||
|
||||
self.conn.schedule_priority_tick()
|
||||
self.conn.schedule_priority_tick(pollnet=True)
|
||||
self.close()
|
||||
|
||||
def validate_name(self):
|
||||
|
@ -452,7 +452,7 @@ class vmmCreatePool(vmmGObjectUI):
|
||||
self.err.show_err(error,
|
||||
details=details)
|
||||
else:
|
||||
self.conn.schedule_priority_tick()
|
||||
self.conn.schedule_priority_tick(pollpool=True)
|
||||
self.close()
|
||||
|
||||
def _async_pool_create(self, asyncjob, build):
|
||||
|
@ -245,6 +245,7 @@ class vmmCreateVolume(vmmGObjectUI):
|
||||
self.show_err(error,
|
||||
details=details)
|
||||
else:
|
||||
# vol-created will refresh the parent pool
|
||||
self.emit("vol-created")
|
||||
self.close()
|
||||
|
||||
|
@ -162,7 +162,7 @@ class vmmDeleteDialog(vmmGObjectUI):
|
||||
if error is not None:
|
||||
self.err.show_err(error, details=details)
|
||||
|
||||
self.conn.schedule_priority_tick()
|
||||
self.conn.schedule_priority_tick(pollvm=True)
|
||||
self.close()
|
||||
|
||||
def _async_delete(self, asyncjob, paths):
|
||||
|
@ -1300,6 +1300,7 @@ class vmmDomain(vmmLibvirtObject):
|
||||
newxml = self.get_xml(inactive=True)
|
||||
destconn.define_domain(newxml)
|
||||
self.idle_add(define_cb)
|
||||
# Don't schedule any conn update, migrate dialog handles it for us
|
||||
|
||||
|
||||
###################
|
||||
|
@ -269,7 +269,7 @@ class vmmEngine(vmmGObject):
|
||||
|
||||
self.timer = self.timeout_add(interval, self.tick)
|
||||
|
||||
def _add_obj_to_tick_queue(self, obj, isprio):
|
||||
def _add_obj_to_tick_queue(self, obj, isprio, **kwargs):
|
||||
if self._tick_queue.full():
|
||||
if not self._tick_thread_slow:
|
||||
logging.debug("Tick is slow, not running at requested rate.")
|
||||
@ -278,29 +278,29 @@ class vmmEngine(vmmGObject):
|
||||
|
||||
self._tick_counter += 1
|
||||
self._tick_queue.put((isprio and PRIO_HIGH or PRIO_LOW,
|
||||
self._tick_counter, obj))
|
||||
self._tick_counter,
|
||||
obj, kwargs))
|
||||
|
||||
def _schedule_priority_tick(self, conn, obj):
|
||||
ignore = conn
|
||||
self._add_obj_to_tick_queue(obj, True)
|
||||
def _schedule_priority_tick(self, conn, kwargs):
|
||||
self._add_obj_to_tick_queue(conn, True, **kwargs)
|
||||
|
||||
def tick(self):
|
||||
for uri in self.conns.keys():
|
||||
conn = self.conns[uri]["conn"]
|
||||
self._add_obj_to_tick_queue(conn, False)
|
||||
self._add_obj_to_tick_queue(conn, False,
|
||||
stats_update=True, pollvm=True)
|
||||
return 1
|
||||
|
||||
def _handle_tick_queue(self):
|
||||
while True:
|
||||
prio, ignore, obj = self._tick_queue.get()
|
||||
stats_update = prio != PRIO_HIGH
|
||||
self._tick_single_conn(obj, stats_update)
|
||||
ignore1, ignore2, obj, kwargs = self._tick_queue.get()
|
||||
self._tick_single_conn(obj, kwargs)
|
||||
self._tick_queue.task_done()
|
||||
return 1
|
||||
|
||||
def _tick_single_conn(self, conn, stats_update):
|
||||
def _tick_single_conn(self, conn, kwargs):
|
||||
try:
|
||||
conn.tick(stats_update=stats_update)
|
||||
conn.tick(**kwargs)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except libvirt.libvirtError, e:
|
||||
|
@ -109,6 +109,7 @@ class vmmHost(vmmGObjectUI):
|
||||
"on_menu_file_quit_activate" : self.exit_app,
|
||||
"on_menu_file_close_activate": self.close,
|
||||
"on_vmm_host_delete_event": self.close,
|
||||
"on_host_page_switch": self.page_changed,
|
||||
|
||||
"on_menu_restore_saved_activate": self.restore_domain,
|
||||
|
||||
@ -362,6 +363,16 @@ class vmmHost(vmmGObjectUI):
|
||||
auto = self.conn.get_autoconnect()
|
||||
self.widget("config-autoconnect").set_active(auto)
|
||||
|
||||
def page_changed(self, src, child, pagenum):
|
||||
ignore = src
|
||||
ignore = child
|
||||
if pagenum == 1:
|
||||
self.conn.schedule_priority_tick(pollnet=True)
|
||||
elif pagenum == 2:
|
||||
self.conn.schedule_priority_tick(pollpool=True)
|
||||
elif pagenum == 3:
|
||||
self.conn.schedule_priority_tick(polliface=True)
|
||||
|
||||
def refresh_resources(self, ignore=None):
|
||||
vm_memory = self.conn.pretty_stats_memory()
|
||||
host_memory = self.conn.pretty_host_memory_size()
|
||||
@ -495,10 +506,12 @@ class vmmHost(vmmGObjectUI):
|
||||
uilist = self.widget("net-list")
|
||||
sel = uilist.get_selection()
|
||||
active = sel.get_selected()
|
||||
net = self.conn.get_net(uuid)
|
||||
net.tick()
|
||||
|
||||
for row in uilist.get_model():
|
||||
if row[0] == uuid:
|
||||
row[4] = self.conn.get_net(uuid).is_active()
|
||||
row[4] = net.is_active()
|
||||
|
||||
if active[1] is not None:
|
||||
currname = active[0].get_value(active[1], 0)
|
||||
@ -864,6 +877,7 @@ class vmmHost(vmmGObjectUI):
|
||||
|
||||
def populate_pool_state(self, uuid):
|
||||
pool = self.conn.get_pool(uuid)
|
||||
pool.tick()
|
||||
auto = pool.get_autostart()
|
||||
active = pool.is_active()
|
||||
|
||||
@ -1209,10 +1223,12 @@ class vmmHost(vmmGObjectUI):
|
||||
iface_list = self.widget("interface-list")
|
||||
sel = iface_list.get_selection()
|
||||
active = sel.get_selected()
|
||||
iface = self.conn.get_interface(name)
|
||||
iface.tick()
|
||||
|
||||
for row in iface_list.get_model():
|
||||
if row[0] == name:
|
||||
row[4] = self.conn.get_interface(name).is_active()
|
||||
row[4] = iface.is_active()
|
||||
|
||||
if active[1] is not None:
|
||||
currname = active[0].get_value(active[1], 0)
|
||||
|
@ -91,16 +91,22 @@ class vmmInterface(vmmLibvirtObject):
|
||||
def get_mac(self):
|
||||
return self.xpath("/interface/mac/@address")
|
||||
|
||||
def _kick_conn(self):
|
||||
self.conn.schedule_priority_tick(polliface=True)
|
||||
|
||||
def start(self):
|
||||
self._backend.create(0)
|
||||
self.idle_add(self.refresh_xml)
|
||||
self._kick_conn()
|
||||
|
||||
def stop(self):
|
||||
self._backend.destroy(0)
|
||||
self.idle_add(self.refresh_xml)
|
||||
self._kick_conn()
|
||||
|
||||
def delete(self):
|
||||
self._backend.undefine()
|
||||
self._kick_conn()
|
||||
|
||||
def is_bridge(self):
|
||||
typ = self.get_type()
|
||||
|
@ -23,7 +23,6 @@ from gi.repository import GObject
|
||||
# pylint: enable=E0611
|
||||
|
||||
import logging
|
||||
import time
|
||||
|
||||
import virtinst
|
||||
|
||||
@ -32,8 +31,6 @@ from virtManager.baseclass import vmmGObject
|
||||
MEDIA_FLOPPY = "floppy"
|
||||
MEDIA_CDROM = "cdrom"
|
||||
|
||||
MEDIA_TIMEOUT = 3
|
||||
|
||||
|
||||
class vmmMediaDevice(vmmGObject):
|
||||
__gsignals__ = {
|
||||
@ -123,14 +120,9 @@ class vmmMediaDevice(vmmGObject):
|
||||
def tick(self):
|
||||
if not self.nodedev_obj:
|
||||
return
|
||||
|
||||
if not self.nodedev_obj.conn.is_active():
|
||||
return
|
||||
|
||||
if (time.time() - self.last_tick) < MEDIA_TIMEOUT:
|
||||
return
|
||||
self.last_tick = time.time()
|
||||
|
||||
try:
|
||||
self.nodedev_obj.refresh_xml()
|
||||
xml = self.nodedev_obj.get_xml()
|
||||
|
@ -492,8 +492,8 @@ class vmmMigrateDialog(vmmGObjectUI):
|
||||
self.err.show_err(error,
|
||||
details=details)
|
||||
else:
|
||||
self.conn.schedule_priority_tick()
|
||||
destconn.schedule_priority_tick()
|
||||
self.conn.schedule_priority_tick(pollvm=True)
|
||||
destconn.schedule_priority_tick(pollvm=True)
|
||||
self.close()
|
||||
|
||||
def _async_set_max_downtime(self, vm, max_downtime, migrate_thread):
|
||||
|
@ -86,15 +86,21 @@ class vmmNetwork(vmmLibvirtObject):
|
||||
except:
|
||||
return ""
|
||||
|
||||
def _kick_conn(self):
|
||||
self.conn.schedule_priority_tick(pollnet=True)
|
||||
|
||||
def start(self):
|
||||
self._backend.create()
|
||||
self._kick_conn()
|
||||
|
||||
def stop(self):
|
||||
self._backend.destroy()
|
||||
self._kick_conn()
|
||||
|
||||
def delete(self):
|
||||
self._backend.undefine()
|
||||
self._backend = None
|
||||
self._kick_conn()
|
||||
|
||||
def set_autostart(self, value):
|
||||
self._backend.setAutostart(value)
|
||||
|
@ -44,3 +44,6 @@ class vmmNodeDevice(vmmLibvirtObject):
|
||||
self._virtinst_obj = virtinst.NodeDeviceParser.parse(
|
||||
self._backend.XMLDesc(0))
|
||||
return self._virtinst_obj
|
||||
|
||||
def tick(self):
|
||||
pass
|
||||
|
@ -83,6 +83,7 @@ class vmmStorageBrowser(vmmGObjectUI):
|
||||
self.reset_state(conn)
|
||||
self.topwin.set_transient_for(parent)
|
||||
self.topwin.present()
|
||||
self.conn.schedule_priority_tick(pollpool=True)
|
||||
|
||||
def close(self, ignore1=None, ignore2=None):
|
||||
logging.debug("Closing storage browser")
|
||||
@ -255,6 +256,8 @@ class vmmStorageBrowser(vmmGObjectUI):
|
||||
|
||||
def pool_selected(self, src_ignore=None):
|
||||
pool = self.current_pool()
|
||||
pool.tick()
|
||||
|
||||
newvol = bool(pool)
|
||||
if pool:
|
||||
newvol = pool.is_active()
|
||||
|
@ -72,12 +72,17 @@ class vmmStoragePool(vmmLibvirtObject):
|
||||
def get_uuid(self):
|
||||
return self._uuid
|
||||
|
||||
def _kick_conn(self):
|
||||
self.conn.schedule_priority_tick(pollpool=True)
|
||||
|
||||
def start(self):
|
||||
self._backend.create(0)
|
||||
self._kick_conn()
|
||||
self.idle_add(self.refresh_xml)
|
||||
|
||||
def stop(self):
|
||||
self._backend.destroy()
|
||||
self._kick_conn()
|
||||
self.idle_add(self.refresh_xml)
|
||||
|
||||
def delete(self, nodelete=True):
|
||||
@ -86,6 +91,7 @@ class vmmStoragePool(vmmLibvirtObject):
|
||||
else:
|
||||
self._backend.delete(0)
|
||||
self._backend = None
|
||||
self._kick_conn()
|
||||
|
||||
def set_autostart(self, value):
|
||||
self._backend.setAutostart(value)
|
||||
|
Loading…
Reference in New Issue
Block a user