mirror of
https://salsa.debian.org/freeipa-team/freeipa.git
synced 2025-01-26 16:16:31 -06:00
Remove unused variables in the code
This commit removes unused variables or rename variables as "expected to be unused" by using "_" prefix. This covers only cases where fix was easy or only one unused variable was in a module Reviewed-By: Florence Blanc-Renaud <frenaud@redhat.com> Reviewed-By: Stanislav Laznicka <slaznick@redhat.com>
This commit is contained in:
parent
452b08754d
commit
0f88f8fe88
@ -49,7 +49,7 @@ def update_metadata_set(log, source_set, target_set):
|
||||
def find_unwrapping_key(log, localhsm, wrapping_key_uri):
|
||||
wrap_keys = localhsm.find_keys(uri=wrapping_key_uri)
|
||||
# find usable unwrapping key with matching ID
|
||||
for key_id, key in wrap_keys.items():
|
||||
for key_id in wrap_keys.keys():
|
||||
unwrap_keys = localhsm.find_keys(id=key_id, cka_unwrap=True)
|
||||
if len(unwrap_keys) > 0:
|
||||
return unwrap_keys.popitem()[1]
|
||||
|
@ -415,7 +415,7 @@ class exuser_find(Method):
|
||||
# patter expects them in one dict. We need to arrange that.
|
||||
for e in entries:
|
||||
e[1]['dn'] = e[0]
|
||||
entries = [e for (dn, e) in entries]
|
||||
entries = [e for (_dn, e) in entries]
|
||||
|
||||
return dict(result=entries, count=len(entries), truncated=truncated)
|
||||
|
||||
|
@ -34,7 +34,7 @@ def retrieve_keytab(api, ccache_name, oneway_keytab_name, oneway_principal):
|
||||
try:
|
||||
sssd = pwd.getpwnam(constants.SSSD_USER)
|
||||
os.chown(oneway_keytab_name, sssd[2], sssd[3])
|
||||
except KeyError as e:
|
||||
except KeyError:
|
||||
# If user 'sssd' does not exist, we don't need to chown from root to sssd
|
||||
# because it means SSSD does not run as sssd user
|
||||
pass
|
||||
|
@ -113,7 +113,7 @@ def main():
|
||||
standard_logging_setup(verbose=True)
|
||||
|
||||
# In 3.0, restarting needs access to api.env
|
||||
(options, argv) = api.bootstrap_with_global_options(context='server')
|
||||
api.bootstrap_with_global_options(context='server')
|
||||
|
||||
add_ca_schema()
|
||||
restart_pki_ds()
|
||||
|
@ -232,9 +232,6 @@ def del_link(realm, replica1, replica2, dirman_passwd, force=False):
|
||||
print("Deleted replication agreement from '%s' to '%s'" % (replica1, replica2))
|
||||
|
||||
def del_master(realm, hostname, options):
|
||||
|
||||
force_del = False
|
||||
|
||||
delrepl = None
|
||||
|
||||
# 1. Connect to the local dogtag DS server
|
||||
@ -258,7 +255,6 @@ def del_master(realm, hostname, options):
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("Unable to connect to replica %s, forcing removal" % hostname)
|
||||
force_del = True
|
||||
|
||||
# 4. Get list of agreements.
|
||||
if delrepl is None:
|
||||
|
@ -89,7 +89,7 @@ def parse_options():
|
||||
parser.add_option("--force", dest="force", action="store_true",
|
||||
help="Force install")
|
||||
|
||||
options, args = parser.parse_args()
|
||||
options, _args = parser.parse_args()
|
||||
safe_options = parser.get_safe_opts(options)
|
||||
|
||||
if options.dnssec_master and options.disable_dnssec_master:
|
||||
|
@ -185,7 +185,7 @@ def parse_options():
|
||||
parser.add_option("--no-log", dest="log_to_file", action="store_false",
|
||||
default=True, help="Do not log into file")
|
||||
|
||||
options, args = parser.parse_args()
|
||||
options, _args = parser.parse_args()
|
||||
safe_options = parser.get_safe_opts(options)
|
||||
|
||||
if options.master and options.replica:
|
||||
|
@ -99,7 +99,7 @@ class CertUpdate(admintool.AdminTool):
|
||||
from ipaserver.install import cainstance
|
||||
cainstance.add_lightweight_ca_tracking_requests(
|
||||
self.log, lwcas)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
self.log.exception(
|
||||
"Failed to add lightweight CA tracking requests")
|
||||
|
||||
|
@ -152,7 +152,7 @@ class automountlocation_import(Command):
|
||||
The basic idea is to read the master file and create all the maps
|
||||
we need, then read each map file and add all the keys for the map.
|
||||
"""
|
||||
location = self.api.Command['automountlocation_show'](args[0])
|
||||
self.api.Command['automountlocation_show'](args[0])
|
||||
|
||||
result = {'maps':[], 'keys':[], 'skipped':[], 'duplicatekeys':[], 'duplicatemaps':[]}
|
||||
maps = {}
|
||||
@ -183,7 +183,7 @@ class automountlocation_import(Command):
|
||||
automountkey=unicode(am[0]),
|
||||
automountinformation=unicode(' '.join(am[1:])))
|
||||
result['keys'].append([am[0], u'auto.master'])
|
||||
except errors.DuplicateEntry as e:
|
||||
except errors.DuplicateEntry:
|
||||
if unicode(am[0]) in DEFAULT_KEYS:
|
||||
# ignore conflict when the key was pre-created by the framework
|
||||
pass
|
||||
@ -198,7 +198,7 @@ class automountlocation_import(Command):
|
||||
try:
|
||||
api.Command['automountmap_add'](args[0], unicode(am[1]))
|
||||
result['maps'].append(am[1])
|
||||
except errors.DuplicateEntry as e:
|
||||
except errors.DuplicateEntry:
|
||||
if unicode(am[1]) in DEFAULT_MAPS:
|
||||
# ignore conflict when the map was pre-created by the framework
|
||||
pass
|
||||
|
@ -18,7 +18,7 @@ class location_show(MethodOverride):
|
||||
|
||||
servers = output.get('servers', {})
|
||||
first = True
|
||||
for hostname, details in servers.items():
|
||||
for details in servers.values():
|
||||
if first:
|
||||
textui.print_indented(_("Servers details:"), indent=1)
|
||||
first = False
|
||||
|
@ -629,9 +629,6 @@ class vault_archive(Local):
|
||||
return self.api.Command.vault_archive_internal.output()
|
||||
|
||||
def forward(self, *args, **options):
|
||||
|
||||
name = args[-1]
|
||||
|
||||
data = options.get('data')
|
||||
input_file = options.get('in')
|
||||
|
||||
@ -883,9 +880,6 @@ class vault_retrieve(Local):
|
||||
return self.api.Command.vault_retrieve_internal.output()
|
||||
|
||||
def forward(self, *args, **options):
|
||||
|
||||
name = args[-1]
|
||||
|
||||
output_file = options.get('out')
|
||||
|
||||
password = options.get('password')
|
||||
|
@ -103,8 +103,6 @@ class ACI(object):
|
||||
lexer = shlex.shlex(aci)
|
||||
lexer.wordchars = lexer.wordchars + "."
|
||||
|
||||
l = []
|
||||
|
||||
var = False
|
||||
op = "="
|
||||
for token in lexer:
|
||||
|
@ -1342,7 +1342,7 @@ cli_plugins = (
|
||||
def run(api):
|
||||
error = None
|
||||
try:
|
||||
(options, argv) = api.bootstrap_with_global_options(context='cli')
|
||||
(_options, argv) = api.bootstrap_with_global_options(context='cli')
|
||||
for klass in cli_plugins:
|
||||
api.add_plugin(klass)
|
||||
api.finalize()
|
||||
|
@ -1186,9 +1186,10 @@ class DN(object):
|
||||
# differ in case must yield the same hash value.
|
||||
|
||||
str_dn = ';,'.join([
|
||||
'++'.join(
|
||||
['=='.join((atype, avalue or '')) for atype,avalue,dummy in rdn]
|
||||
) for rdn in self.rdns
|
||||
'++'.join([
|
||||
'=='.join((atype, avalue or ''))
|
||||
for atype, avalue, _dummy in rdn
|
||||
]) for rdn in self.rdns
|
||||
])
|
||||
return hash(str_dn.lower())
|
||||
|
||||
@ -1225,9 +1226,7 @@ class DN(object):
|
||||
|
||||
def _cmp_sequence(self, pattern, self_start, pat_len):
|
||||
self_idx = self_start
|
||||
self_len = len(self)
|
||||
pat_idx = 0
|
||||
# and self_idx < self_len
|
||||
while pat_idx < pat_len:
|
||||
r = cmp_rdns(self.rdns[self_idx], pattern.rdns[pat_idx])
|
||||
if r != 0:
|
||||
|
@ -113,7 +113,7 @@ def ca_status(ca_host=None):
|
||||
"""
|
||||
if ca_host is None:
|
||||
ca_host = api.env.ca_host
|
||||
status, headers, body = http_request(
|
||||
status, _headers, body = http_request(
|
||||
ca_host, 8080, '/ca/admin/ca/getStatus')
|
||||
if status == 503:
|
||||
# Service temporarily unavailable
|
||||
|
@ -44,8 +44,8 @@ class Graph(object):
|
||||
|
||||
# delete _adjacencies
|
||||
del self._adj[vertex]
|
||||
for key, _adj in self._adj.items():
|
||||
_adj[:] = [v for v in _adj if v != vertex]
|
||||
for adj in self._adj.values():
|
||||
adj[:] = [v for v in adj if v != vertex]
|
||||
|
||||
# delete edges
|
||||
edges = [e for e in self.edges if e[0] != vertex and e[1] != vertex]
|
||||
|
@ -152,7 +152,7 @@ class ConfigureTool(admintool.AdminTool):
|
||||
**kwargs
|
||||
)
|
||||
|
||||
for group, opt_group in groups.items():
|
||||
for opt_group in groups.values():
|
||||
parser.add_option_group(opt_group)
|
||||
|
||||
super(ConfigureTool, cls).add_options(parser,
|
||||
|
@ -60,7 +60,7 @@ class Step(Installable):
|
||||
raise AttributeError('parent')
|
||||
|
||||
def _install(self):
|
||||
for nothing in self._installer(self.parent):
|
||||
for _nothing in self._installer(self.parent):
|
||||
yield from_(super(Step, self)._install())
|
||||
|
||||
@staticmethod
|
||||
@ -68,7 +68,7 @@ class Step(Installable):
|
||||
yield
|
||||
|
||||
def _uninstall(self):
|
||||
for nothing in self._uninstaller(self.parent):
|
||||
for _nothing in self._uninstaller(self.parent):
|
||||
yield from_(super(Step, self)._uninstall())
|
||||
|
||||
@staticmethod
|
||||
|
@ -234,7 +234,7 @@ class NSSConnection(httplib.HTTPConnection, NSSAddressFamilyFallback):
|
||||
self.sock.set_ssl_option(ssl.SSL_HANDSHAKE_AS_CLIENT, True)
|
||||
try:
|
||||
self.sock.set_ssl_version_range(self.tls_version_min, self.tls_version_max)
|
||||
except NSPRError as e:
|
||||
except NSPRError:
|
||||
root_logger.error('Failed to set TLS range to %s, %s' % (self.tls_version_min, self.tls_version_max))
|
||||
raise
|
||||
self.sock.set_ssl_option(ssl_require_safe_negotiation, False)
|
||||
|
@ -742,13 +742,13 @@ class ADTRUSTInstance(service.Service):
|
||||
try:
|
||||
self.ldap_enable('ADTRUST', self.fqdn, self.dm_password, \
|
||||
self.suffix)
|
||||
except (ldap.ALREADY_EXISTS, errors.DuplicateEntry) as e:
|
||||
except (ldap.ALREADY_EXISTS, errors.DuplicateEntry):
|
||||
root_logger.info("ADTRUST Service startup entry already exists.")
|
||||
|
||||
try:
|
||||
self.ldap_enable('EXTID', self.fqdn, self.dm_password, \
|
||||
self.suffix)
|
||||
except (ldap.ALREADY_EXISTS, errors.DuplicateEntry) as e:
|
||||
except (ldap.ALREADY_EXISTS, errors.DuplicateEntry):
|
||||
root_logger.info("EXTID Service startup entry already exists.")
|
||||
|
||||
def __setup_sub_dict(self):
|
||||
|
@ -1021,7 +1021,7 @@ def load_external_cert(files, subject_base):
|
||||
ca_subject = DN(('CN', 'Certificate Authority'), subject_base)
|
||||
ca_nickname = None
|
||||
cache = {}
|
||||
for nickname, trust_flags in nssdb.list_certs():
|
||||
for nickname, _trust_flags in nssdb.list_certs():
|
||||
cert = nssdb.get_cert(nickname, pem=True)
|
||||
|
||||
nss_cert = x509.load_certificate(cert)
|
||||
|
@ -200,7 +200,7 @@ class PBKDF2KeyDerivation(XMLKeyDerivation):
|
||||
hash = [0] * mac.digest_size
|
||||
|
||||
# Perform n iterations.
|
||||
for j in xrange(self.iter):
|
||||
for _j in xrange(self.iter):
|
||||
tmp = mac.copy()
|
||||
tmp.update(last)
|
||||
last = tmp.digest()
|
||||
|
@ -286,7 +286,7 @@ class KrbInstance(service.Service):
|
||||
)
|
||||
try:
|
||||
ipautil.run(args, nolog=(self.master_password,), stdin=''.join(dialogue))
|
||||
except ipautil.CalledProcessError as e:
|
||||
except ipautil.CalledProcessError:
|
||||
print("Failed to initialize the realm container")
|
||||
|
||||
def __configure_instance(self):
|
||||
|
@ -848,7 +848,7 @@ class LDAPUpdate(object):
|
||||
def get_all_files(self, root, recursive=False):
|
||||
"""Get all update files"""
|
||||
f = []
|
||||
for path, subdirs, files in os.walk(root):
|
||||
for path, _subdirs, files in os.walk(root):
|
||||
for name in files:
|
||||
if fnmatch.fnmatch(name, "*.update"):
|
||||
f.append(os.path.join(path, name))
|
||||
|
@ -152,7 +152,7 @@ class NTPInstance(service.Service):
|
||||
fd.write(line)
|
||||
continue
|
||||
sline = sline.replace(NTPD_OPTS_QUOTE, '')
|
||||
(variable, opts) = sline.split('=', 1)
|
||||
(_variable, opts) = sline.split('=', 1)
|
||||
fd.write(NTPD_OPTS_VAR + '="%s %s"\n' % (opts, ' '.join(newopts)))
|
||||
done = True
|
||||
else:
|
||||
|
@ -66,8 +66,6 @@ class ODSExporterInstance(service.Service):
|
||||
self.start_creation()
|
||||
|
||||
def __check_dnssec_status(self):
|
||||
ods_enforcerd = services.knownservices.ods_enforcerd
|
||||
|
||||
try:
|
||||
self.ods_uid = pwd.getpwnam(constants.ODS_USER).pw_uid
|
||||
except KeyError:
|
||||
|
@ -40,7 +40,7 @@ class update_default_range(Updater):
|
||||
dn = DN(self.api.env.container_ranges, self.api.env.basedn)
|
||||
search_filter = "objectclass=ipaDomainIDRange"
|
||||
try:
|
||||
(entries, truncated) = ldap.find_entries(search_filter, [], dn)
|
||||
ldap.find_entries(search_filter, [], dn)
|
||||
except errors.NotFound:
|
||||
pass
|
||||
else:
|
||||
@ -85,7 +85,7 @@ class update_default_range(Updater):
|
||||
search_filter = "objectclass=dnaSharedConfig"
|
||||
attrs = ['dnaHostname', 'dnaRemainingValues']
|
||||
try:
|
||||
(entries, truncated) = ldap.find_entries(search_filter, attrs, dn)
|
||||
(entries, _truncated) = ldap.find_entries(search_filter, attrs, dn)
|
||||
except errors.NotFound:
|
||||
root_logger.warning("default_range: no dnaSharedConfig object found. "
|
||||
"Cannot check default range size.")
|
||||
|
@ -278,7 +278,6 @@ class update_master_to_dnsforwardzones(DNSUpdater):
|
||||
backup_filename = u'dns-master-to-forward-zones-%Y-%m-%d-%H-%M-%S.ldif'
|
||||
|
||||
def execute(self, **options):
|
||||
ldap = self.api.Backend.ldap2
|
||||
# check LDAP if forwardzones already uses new semantics
|
||||
if not self.version_update_needed(target_version=1):
|
||||
# forwardzones already uses new semantics,
|
||||
|
@ -132,7 +132,7 @@ class update_idrange_baserid(Updater):
|
||||
)
|
||||
|
||||
try:
|
||||
(entries, truncated) = ldap.find_entries(
|
||||
(entries, _truncated) = ldap.find_entries(
|
||||
search_filter, ['ipabaserid'], base_dn,
|
||||
paged_search=True, time_limit=0, size_limit=0)
|
||||
|
||||
|
@ -433,7 +433,7 @@ class update_managed_permissions(Updater):
|
||||
else:
|
||||
if 'ipapermissiontype' not in legacy_entry:
|
||||
if is_new:
|
||||
acientry, acistr = (
|
||||
_acientry, acistr = (
|
||||
permission_plugin._get_aci_entry_and_string(
|
||||
legacy_entry, notfound_ok=True))
|
||||
try:
|
||||
|
@ -60,7 +60,7 @@ class update_passync_privilege_update(Updater):
|
||||
self.api.env.basedn)
|
||||
|
||||
try:
|
||||
entry = ldap.get_entry(passsync_dn, [''])
|
||||
ldap.get_entry(passsync_dn, [''])
|
||||
except errors.NotFound:
|
||||
root_logger.debug("PassSync user not found, no update needed")
|
||||
sysupgrade.set_upgrade_state('winsync', 'passsync_privilege_updated', True)
|
||||
|
@ -177,7 +177,7 @@ class update_uniqueness_plugins_to_new_syntax(Updater):
|
||||
)
|
||||
|
||||
try:
|
||||
entries, truncated = ldap.find_entries(
|
||||
entries, _truncated = ldap.find_entries(
|
||||
filter=old_style_plugin_search_filter,
|
||||
base_dn=self.plugins_dn,
|
||||
)
|
||||
|
@ -55,7 +55,7 @@ class update_upload_cacrt(Updater):
|
||||
if nickname == ca_nickname and ca_enabled:
|
||||
trust_flags = 'CT,C,C'
|
||||
cert = db.get_cert_from_db(nickname, pem=False)
|
||||
trust, ca, eku = certstore.trust_flags_to_key_policy(trust_flags)
|
||||
trust, _ca, eku = certstore.trust_flags_to_key_policy(trust_flags)
|
||||
|
||||
dn = DN(('cn', nickname), ('cn', 'certificates'), ('cn', 'ipa'),
|
||||
('cn','etc'), self.api.env.basedn)
|
||||
|
@ -125,7 +125,7 @@ def update_schema(schema_files, ldapi=False, dm_password=None,):
|
||||
|
||||
for filename in schema_files:
|
||||
log.debug('Processing schema LDIF file %s', filename)
|
||||
dn, new_schema = ldap.schema.subentry.urlfetch(filename)
|
||||
_dn, new_schema = ldap.schema.subentry.urlfetch(filename)
|
||||
|
||||
for attrname, cls in SCHEMA_ELEMENT_CLASSES:
|
||||
for oids_set in _get_oid_dependency_order(new_schema, cls):
|
||||
|
@ -116,7 +116,7 @@ def find_providing_server(svcname, conn, host_name=None, api=api):
|
||||
'ipaConfigString': 'enabledService',
|
||||
'cn': svcname}, rules='&')
|
||||
try:
|
||||
entries, trunc = conn.find_entries(filter=query_filter, base_dn=dn)
|
||||
entries, _trunc = conn.find_entries(filter=query_filter, base_dn=dn)
|
||||
except errors.NotFound:
|
||||
return None
|
||||
if len(entries):
|
||||
@ -523,7 +523,7 @@ class Service(object):
|
||||
search_kw = {'ipaConfigString': u'enabledService'}
|
||||
filter = self.admin_conn.make_filter(search_kw)
|
||||
try:
|
||||
entries, truncated = self.admin_conn.find_entries(
|
||||
entries, _truncated = self.admin_conn.find_entries(
|
||||
filter=filter,
|
||||
attrs_list=['ipaConfigString'],
|
||||
base_dn=entry_dn,
|
||||
|
@ -293,7 +293,7 @@ class config_mod(LDAPUpdate):
|
||||
for field in entry_attrs[k].split(',')]
|
||||
# test if all base types (without sub-types) are allowed
|
||||
for a in attributes:
|
||||
a, tomato, olive = a.partition(';')
|
||||
a, _dummy, _dummy = a.partition(';')
|
||||
if a not in allowed_attrs:
|
||||
raise errors.ValidationError(
|
||||
name=k, error=_('attribute "%s" not allowed') % a
|
||||
@ -325,7 +325,7 @@ class config_mod(LDAPUpdate):
|
||||
if self.api.Object[obj].uuid_attribute:
|
||||
checked_attrs = checked_attrs + [self.api.Object[obj].uuid_attribute]
|
||||
for obj_attr in checked_attrs:
|
||||
obj_attr, tomato, olive = obj_attr.partition(';')
|
||||
obj_attr, _dummy, _dummy = obj_attr.partition(';')
|
||||
if obj_attr in OPERATIONAL_ATTRIBUTES:
|
||||
continue
|
||||
if obj_attr in self.api.Object[obj].params and \
|
||||
|
@ -60,7 +60,7 @@ def get_master_entries(ldap, api):
|
||||
api.env.basedn
|
||||
)
|
||||
|
||||
masters, _ = ldap.find_entries(
|
||||
masters, _dummy = ldap.find_entries(
|
||||
filter="(cn=*)",
|
||||
base_dn=container_masters,
|
||||
scope=ldap.SCOPE_ONELEVEL,
|
||||
|
@ -329,7 +329,7 @@ class group_del(LDAPDelete):
|
||||
assert isinstance(dn, DN)
|
||||
config = ldap.get_ipa_config()
|
||||
def_primary_group = config.get('ipadefaultprimarygroup', '')
|
||||
def_primary_group_dn = group_dn = self.obj.get_dn(def_primary_group)
|
||||
def_primary_group_dn = self.obj.get_dn(def_primary_group)
|
||||
if dn == def_primary_group_dn:
|
||||
raise errors.DefaultGroupError()
|
||||
group_attrs = self.obj.methods.show(
|
||||
|
@ -411,7 +411,8 @@ class hbactest(Command):
|
||||
ldap = self.api.Backend.ldap2
|
||||
group_container = DN(api.env.container_group, api.env.basedn)
|
||||
try:
|
||||
entries, truncated = ldap.find_entries(filter_sids, ['memberof'], group_container)
|
||||
entries, _truncated = ldap.find_entries(
|
||||
filter_sids, ['memberof'], group_container)
|
||||
except errors.NotFound:
|
||||
request.user.groups = []
|
||||
else:
|
||||
|
@ -601,7 +601,7 @@ class host(LDAPObject):
|
||||
managed_hosts = []
|
||||
|
||||
try:
|
||||
(hosts, truncated) = ldap.find_entries(
|
||||
(hosts, _truncated) = ldap.find_entries(
|
||||
base_dn=DN(self.container_dn, api.env.basedn),
|
||||
filter=host_filter, attrs_list=host_attrs)
|
||||
|
||||
|
@ -64,7 +64,7 @@ def validate_permission_to_privilege(api, permission):
|
||||
'(objectClass=ipaPermissionV2)', '(!(ipaPermBindRuleType=permission))',
|
||||
ldap.make_filter_from_attr('cn', permission, rules='|')])
|
||||
try:
|
||||
entries, truncated = ldap.find_entries(
|
||||
entries, _truncated = ldap.find_entries(
|
||||
filter=ldapfilter,
|
||||
attrs_list=['cn', 'ipapermbindruletype'],
|
||||
base_dn=DN(api.env.container_permission, api.env.basedn),
|
||||
|
@ -109,7 +109,7 @@ def validate_selinuxuser(ugettext, user):
|
||||
|
||||
# If we add in ::: we don't have to check to see if some values are
|
||||
# empty
|
||||
(name, mls, mcs, ignore) = (user + ':::').split(':', 3)
|
||||
(name, mls, mcs, _ignore) = (user + ':::').split(':', 3)
|
||||
|
||||
if not regex_name.match(name):
|
||||
return _('Invalid SELinux user name, only a-Z and _ are allowed')
|
||||
|
@ -889,7 +889,7 @@ class server_conncheck(crud.PKQuery):
|
||||
follow_name_owner_changes=True)
|
||||
server = dbus.Interface(obj, 'org.freeipa.server')
|
||||
|
||||
ret, stdout, stderr = server.conncheck(keys[-1])
|
||||
ret, stdout, _stderr = server.conncheck(keys[-1])
|
||||
|
||||
result = dict(
|
||||
result=(ret == 0),
|
||||
|
@ -284,7 +284,7 @@ def check_required_principal(ldap, principal):
|
||||
"""
|
||||
try:
|
||||
host_is_master(ldap, principal.hostname)
|
||||
except errors.ValidationError as e:
|
||||
except errors.ValidationError:
|
||||
service_types = ['HTTP', 'ldap', 'DNS', 'dogtagldap']
|
||||
if principal.service_name in service_types:
|
||||
raise errors.ValidationError(name='principal', error=_('This principal is required by the IPA master'))
|
||||
|
@ -164,7 +164,7 @@ class sudocmd_del(LDAPDelete):
|
||||
ldap.MATCH_ALL)
|
||||
dependent_sudorules = []
|
||||
try:
|
||||
entries, truncated = ldap.find_entries(
|
||||
entries, _truncated = ldap.find_entries(
|
||||
filter, ['cn'],
|
||||
base_dn=DN(api.env.container_sudorule, api.env.basedn))
|
||||
except errors.NotFound:
|
||||
|
@ -961,7 +961,7 @@ class MemcacheSessionManager(SessionManager):
|
||||
|
||||
try:
|
||||
session_cookie = Cookie.get_named_cookie_from_string(cookie_header, self.session_cookie_name)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
session_cookie = None
|
||||
if session_cookie:
|
||||
session_id = session_cookie.value
|
||||
|
Loading…
Reference in New Issue
Block a user