mirror of
https://salsa.debian.org/freeipa-team/freeipa.git
synced 2025-02-25 18:55:28 -06:00
Remove unused variables in the code
This commit removes unused variables or rename variables as "expected to be unused" by using "_" prefix. This covers only cases where fix was easy or only one unused variable was in a module Reviewed-By: Florence Blanc-Renaud <frenaud@redhat.com> Reviewed-By: Stanislav Laznicka <slaznick@redhat.com>
This commit is contained in:
parent
452b08754d
commit
0f88f8fe88
@ -49,7 +49,7 @@ def update_metadata_set(log, source_set, target_set):
|
|||||||
def find_unwrapping_key(log, localhsm, wrapping_key_uri):
|
def find_unwrapping_key(log, localhsm, wrapping_key_uri):
|
||||||
wrap_keys = localhsm.find_keys(uri=wrapping_key_uri)
|
wrap_keys = localhsm.find_keys(uri=wrapping_key_uri)
|
||||||
# find usable unwrapping key with matching ID
|
# find usable unwrapping key with matching ID
|
||||||
for key_id, key in wrap_keys.items():
|
for key_id in wrap_keys.keys():
|
||||||
unwrap_keys = localhsm.find_keys(id=key_id, cka_unwrap=True)
|
unwrap_keys = localhsm.find_keys(id=key_id, cka_unwrap=True)
|
||||||
if len(unwrap_keys) > 0:
|
if len(unwrap_keys) > 0:
|
||||||
return unwrap_keys.popitem()[1]
|
return unwrap_keys.popitem()[1]
|
||||||
|
@ -415,7 +415,7 @@ class exuser_find(Method):
|
|||||||
# patter expects them in one dict. We need to arrange that.
|
# patter expects them in one dict. We need to arrange that.
|
||||||
for e in entries:
|
for e in entries:
|
||||||
e[1]['dn'] = e[0]
|
e[1]['dn'] = e[0]
|
||||||
entries = [e for (dn, e) in entries]
|
entries = [e for (_dn, e) in entries]
|
||||||
|
|
||||||
return dict(result=entries, count=len(entries), truncated=truncated)
|
return dict(result=entries, count=len(entries), truncated=truncated)
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ def retrieve_keytab(api, ccache_name, oneway_keytab_name, oneway_principal):
|
|||||||
try:
|
try:
|
||||||
sssd = pwd.getpwnam(constants.SSSD_USER)
|
sssd = pwd.getpwnam(constants.SSSD_USER)
|
||||||
os.chown(oneway_keytab_name, sssd[2], sssd[3])
|
os.chown(oneway_keytab_name, sssd[2], sssd[3])
|
||||||
except KeyError as e:
|
except KeyError:
|
||||||
# If user 'sssd' does not exist, we don't need to chown from root to sssd
|
# If user 'sssd' does not exist, we don't need to chown from root to sssd
|
||||||
# because it means SSSD does not run as sssd user
|
# because it means SSSD does not run as sssd user
|
||||||
pass
|
pass
|
||||||
|
@ -113,7 +113,7 @@ def main():
|
|||||||
standard_logging_setup(verbose=True)
|
standard_logging_setup(verbose=True)
|
||||||
|
|
||||||
# In 3.0, restarting needs access to api.env
|
# In 3.0, restarting needs access to api.env
|
||||||
(options, argv) = api.bootstrap_with_global_options(context='server')
|
api.bootstrap_with_global_options(context='server')
|
||||||
|
|
||||||
add_ca_schema()
|
add_ca_schema()
|
||||||
restart_pki_ds()
|
restart_pki_ds()
|
||||||
|
@ -232,9 +232,6 @@ def del_link(realm, replica1, replica2, dirman_passwd, force=False):
|
|||||||
print("Deleted replication agreement from '%s' to '%s'" % (replica1, replica2))
|
print("Deleted replication agreement from '%s' to '%s'" % (replica1, replica2))
|
||||||
|
|
||||||
def del_master(realm, hostname, options):
|
def del_master(realm, hostname, options):
|
||||||
|
|
||||||
force_del = False
|
|
||||||
|
|
||||||
delrepl = None
|
delrepl = None
|
||||||
|
|
||||||
# 1. Connect to the local dogtag DS server
|
# 1. Connect to the local dogtag DS server
|
||||||
@ -258,7 +255,6 @@ def del_master(realm, hostname, options):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
print("Unable to connect to replica %s, forcing removal" % hostname)
|
print("Unable to connect to replica %s, forcing removal" % hostname)
|
||||||
force_del = True
|
|
||||||
|
|
||||||
# 4. Get list of agreements.
|
# 4. Get list of agreements.
|
||||||
if delrepl is None:
|
if delrepl is None:
|
||||||
|
@ -89,7 +89,7 @@ def parse_options():
|
|||||||
parser.add_option("--force", dest="force", action="store_true",
|
parser.add_option("--force", dest="force", action="store_true",
|
||||||
help="Force install")
|
help="Force install")
|
||||||
|
|
||||||
options, args = parser.parse_args()
|
options, _args = parser.parse_args()
|
||||||
safe_options = parser.get_safe_opts(options)
|
safe_options = parser.get_safe_opts(options)
|
||||||
|
|
||||||
if options.dnssec_master and options.disable_dnssec_master:
|
if options.dnssec_master and options.disable_dnssec_master:
|
||||||
|
@ -185,7 +185,7 @@ def parse_options():
|
|||||||
parser.add_option("--no-log", dest="log_to_file", action="store_false",
|
parser.add_option("--no-log", dest="log_to_file", action="store_false",
|
||||||
default=True, help="Do not log into file")
|
default=True, help="Do not log into file")
|
||||||
|
|
||||||
options, args = parser.parse_args()
|
options, _args = parser.parse_args()
|
||||||
safe_options = parser.get_safe_opts(options)
|
safe_options = parser.get_safe_opts(options)
|
||||||
|
|
||||||
if options.master and options.replica:
|
if options.master and options.replica:
|
||||||
|
@ -99,7 +99,7 @@ class CertUpdate(admintool.AdminTool):
|
|||||||
from ipaserver.install import cainstance
|
from ipaserver.install import cainstance
|
||||||
cainstance.add_lightweight_ca_tracking_requests(
|
cainstance.add_lightweight_ca_tracking_requests(
|
||||||
self.log, lwcas)
|
self.log, lwcas)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
self.log.exception(
|
self.log.exception(
|
||||||
"Failed to add lightweight CA tracking requests")
|
"Failed to add lightweight CA tracking requests")
|
||||||
|
|
||||||
|
@ -152,7 +152,7 @@ class automountlocation_import(Command):
|
|||||||
The basic idea is to read the master file and create all the maps
|
The basic idea is to read the master file and create all the maps
|
||||||
we need, then read each map file and add all the keys for the map.
|
we need, then read each map file and add all the keys for the map.
|
||||||
"""
|
"""
|
||||||
location = self.api.Command['automountlocation_show'](args[0])
|
self.api.Command['automountlocation_show'](args[0])
|
||||||
|
|
||||||
result = {'maps':[], 'keys':[], 'skipped':[], 'duplicatekeys':[], 'duplicatemaps':[]}
|
result = {'maps':[], 'keys':[], 'skipped':[], 'duplicatekeys':[], 'duplicatemaps':[]}
|
||||||
maps = {}
|
maps = {}
|
||||||
@ -183,7 +183,7 @@ class automountlocation_import(Command):
|
|||||||
automountkey=unicode(am[0]),
|
automountkey=unicode(am[0]),
|
||||||
automountinformation=unicode(' '.join(am[1:])))
|
automountinformation=unicode(' '.join(am[1:])))
|
||||||
result['keys'].append([am[0], u'auto.master'])
|
result['keys'].append([am[0], u'auto.master'])
|
||||||
except errors.DuplicateEntry as e:
|
except errors.DuplicateEntry:
|
||||||
if unicode(am[0]) in DEFAULT_KEYS:
|
if unicode(am[0]) in DEFAULT_KEYS:
|
||||||
# ignore conflict when the key was pre-created by the framework
|
# ignore conflict when the key was pre-created by the framework
|
||||||
pass
|
pass
|
||||||
@ -198,7 +198,7 @@ class automountlocation_import(Command):
|
|||||||
try:
|
try:
|
||||||
api.Command['automountmap_add'](args[0], unicode(am[1]))
|
api.Command['automountmap_add'](args[0], unicode(am[1]))
|
||||||
result['maps'].append(am[1])
|
result['maps'].append(am[1])
|
||||||
except errors.DuplicateEntry as e:
|
except errors.DuplicateEntry:
|
||||||
if unicode(am[1]) in DEFAULT_MAPS:
|
if unicode(am[1]) in DEFAULT_MAPS:
|
||||||
# ignore conflict when the map was pre-created by the framework
|
# ignore conflict when the map was pre-created by the framework
|
||||||
pass
|
pass
|
||||||
|
@ -18,7 +18,7 @@ class location_show(MethodOverride):
|
|||||||
|
|
||||||
servers = output.get('servers', {})
|
servers = output.get('servers', {})
|
||||||
first = True
|
first = True
|
||||||
for hostname, details in servers.items():
|
for details in servers.values():
|
||||||
if first:
|
if first:
|
||||||
textui.print_indented(_("Servers details:"), indent=1)
|
textui.print_indented(_("Servers details:"), indent=1)
|
||||||
first = False
|
first = False
|
||||||
|
@ -629,9 +629,6 @@ class vault_archive(Local):
|
|||||||
return self.api.Command.vault_archive_internal.output()
|
return self.api.Command.vault_archive_internal.output()
|
||||||
|
|
||||||
def forward(self, *args, **options):
|
def forward(self, *args, **options):
|
||||||
|
|
||||||
name = args[-1]
|
|
||||||
|
|
||||||
data = options.get('data')
|
data = options.get('data')
|
||||||
input_file = options.get('in')
|
input_file = options.get('in')
|
||||||
|
|
||||||
@ -883,9 +880,6 @@ class vault_retrieve(Local):
|
|||||||
return self.api.Command.vault_retrieve_internal.output()
|
return self.api.Command.vault_retrieve_internal.output()
|
||||||
|
|
||||||
def forward(self, *args, **options):
|
def forward(self, *args, **options):
|
||||||
|
|
||||||
name = args[-1]
|
|
||||||
|
|
||||||
output_file = options.get('out')
|
output_file = options.get('out')
|
||||||
|
|
||||||
password = options.get('password')
|
password = options.get('password')
|
||||||
|
@ -103,8 +103,6 @@ class ACI(object):
|
|||||||
lexer = shlex.shlex(aci)
|
lexer = shlex.shlex(aci)
|
||||||
lexer.wordchars = lexer.wordchars + "."
|
lexer.wordchars = lexer.wordchars + "."
|
||||||
|
|
||||||
l = []
|
|
||||||
|
|
||||||
var = False
|
var = False
|
||||||
op = "="
|
op = "="
|
||||||
for token in lexer:
|
for token in lexer:
|
||||||
|
@ -1342,7 +1342,7 @@ cli_plugins = (
|
|||||||
def run(api):
|
def run(api):
|
||||||
error = None
|
error = None
|
||||||
try:
|
try:
|
||||||
(options, argv) = api.bootstrap_with_global_options(context='cli')
|
(_options, argv) = api.bootstrap_with_global_options(context='cli')
|
||||||
for klass in cli_plugins:
|
for klass in cli_plugins:
|
||||||
api.add_plugin(klass)
|
api.add_plugin(klass)
|
||||||
api.finalize()
|
api.finalize()
|
||||||
|
@ -1186,9 +1186,10 @@ class DN(object):
|
|||||||
# differ in case must yield the same hash value.
|
# differ in case must yield the same hash value.
|
||||||
|
|
||||||
str_dn = ';,'.join([
|
str_dn = ';,'.join([
|
||||||
'++'.join(
|
'++'.join([
|
||||||
['=='.join((atype, avalue or '')) for atype,avalue,dummy in rdn]
|
'=='.join((atype, avalue or ''))
|
||||||
) for rdn in self.rdns
|
for atype, avalue, _dummy in rdn
|
||||||
|
]) for rdn in self.rdns
|
||||||
])
|
])
|
||||||
return hash(str_dn.lower())
|
return hash(str_dn.lower())
|
||||||
|
|
||||||
@ -1225,9 +1226,7 @@ class DN(object):
|
|||||||
|
|
||||||
def _cmp_sequence(self, pattern, self_start, pat_len):
|
def _cmp_sequence(self, pattern, self_start, pat_len):
|
||||||
self_idx = self_start
|
self_idx = self_start
|
||||||
self_len = len(self)
|
|
||||||
pat_idx = 0
|
pat_idx = 0
|
||||||
# and self_idx < self_len
|
|
||||||
while pat_idx < pat_len:
|
while pat_idx < pat_len:
|
||||||
r = cmp_rdns(self.rdns[self_idx], pattern.rdns[pat_idx])
|
r = cmp_rdns(self.rdns[self_idx], pattern.rdns[pat_idx])
|
||||||
if r != 0:
|
if r != 0:
|
||||||
|
@ -113,7 +113,7 @@ def ca_status(ca_host=None):
|
|||||||
"""
|
"""
|
||||||
if ca_host is None:
|
if ca_host is None:
|
||||||
ca_host = api.env.ca_host
|
ca_host = api.env.ca_host
|
||||||
status, headers, body = http_request(
|
status, _headers, body = http_request(
|
||||||
ca_host, 8080, '/ca/admin/ca/getStatus')
|
ca_host, 8080, '/ca/admin/ca/getStatus')
|
||||||
if status == 503:
|
if status == 503:
|
||||||
# Service temporarily unavailable
|
# Service temporarily unavailable
|
||||||
|
@ -44,8 +44,8 @@ class Graph(object):
|
|||||||
|
|
||||||
# delete _adjacencies
|
# delete _adjacencies
|
||||||
del self._adj[vertex]
|
del self._adj[vertex]
|
||||||
for key, _adj in self._adj.items():
|
for adj in self._adj.values():
|
||||||
_adj[:] = [v for v in _adj if v != vertex]
|
adj[:] = [v for v in adj if v != vertex]
|
||||||
|
|
||||||
# delete edges
|
# delete edges
|
||||||
edges = [e for e in self.edges if e[0] != vertex and e[1] != vertex]
|
edges = [e for e in self.edges if e[0] != vertex and e[1] != vertex]
|
||||||
|
@ -152,7 +152,7 @@ class ConfigureTool(admintool.AdminTool):
|
|||||||
**kwargs
|
**kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
for group, opt_group in groups.items():
|
for opt_group in groups.values():
|
||||||
parser.add_option_group(opt_group)
|
parser.add_option_group(opt_group)
|
||||||
|
|
||||||
super(ConfigureTool, cls).add_options(parser,
|
super(ConfigureTool, cls).add_options(parser,
|
||||||
|
@ -60,7 +60,7 @@ class Step(Installable):
|
|||||||
raise AttributeError('parent')
|
raise AttributeError('parent')
|
||||||
|
|
||||||
def _install(self):
|
def _install(self):
|
||||||
for nothing in self._installer(self.parent):
|
for _nothing in self._installer(self.parent):
|
||||||
yield from_(super(Step, self)._install())
|
yield from_(super(Step, self)._install())
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -68,7 +68,7 @@ class Step(Installable):
|
|||||||
yield
|
yield
|
||||||
|
|
||||||
def _uninstall(self):
|
def _uninstall(self):
|
||||||
for nothing in self._uninstaller(self.parent):
|
for _nothing in self._uninstaller(self.parent):
|
||||||
yield from_(super(Step, self)._uninstall())
|
yield from_(super(Step, self)._uninstall())
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -234,7 +234,7 @@ class NSSConnection(httplib.HTTPConnection, NSSAddressFamilyFallback):
|
|||||||
self.sock.set_ssl_option(ssl.SSL_HANDSHAKE_AS_CLIENT, True)
|
self.sock.set_ssl_option(ssl.SSL_HANDSHAKE_AS_CLIENT, True)
|
||||||
try:
|
try:
|
||||||
self.sock.set_ssl_version_range(self.tls_version_min, self.tls_version_max)
|
self.sock.set_ssl_version_range(self.tls_version_min, self.tls_version_max)
|
||||||
except NSPRError as e:
|
except NSPRError:
|
||||||
root_logger.error('Failed to set TLS range to %s, %s' % (self.tls_version_min, self.tls_version_max))
|
root_logger.error('Failed to set TLS range to %s, %s' % (self.tls_version_min, self.tls_version_max))
|
||||||
raise
|
raise
|
||||||
self.sock.set_ssl_option(ssl_require_safe_negotiation, False)
|
self.sock.set_ssl_option(ssl_require_safe_negotiation, False)
|
||||||
|
@ -742,13 +742,13 @@ class ADTRUSTInstance(service.Service):
|
|||||||
try:
|
try:
|
||||||
self.ldap_enable('ADTRUST', self.fqdn, self.dm_password, \
|
self.ldap_enable('ADTRUST', self.fqdn, self.dm_password, \
|
||||||
self.suffix)
|
self.suffix)
|
||||||
except (ldap.ALREADY_EXISTS, errors.DuplicateEntry) as e:
|
except (ldap.ALREADY_EXISTS, errors.DuplicateEntry):
|
||||||
root_logger.info("ADTRUST Service startup entry already exists.")
|
root_logger.info("ADTRUST Service startup entry already exists.")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.ldap_enable('EXTID', self.fqdn, self.dm_password, \
|
self.ldap_enable('EXTID', self.fqdn, self.dm_password, \
|
||||||
self.suffix)
|
self.suffix)
|
||||||
except (ldap.ALREADY_EXISTS, errors.DuplicateEntry) as e:
|
except (ldap.ALREADY_EXISTS, errors.DuplicateEntry):
|
||||||
root_logger.info("EXTID Service startup entry already exists.")
|
root_logger.info("EXTID Service startup entry already exists.")
|
||||||
|
|
||||||
def __setup_sub_dict(self):
|
def __setup_sub_dict(self):
|
||||||
|
@ -1021,7 +1021,7 @@ def load_external_cert(files, subject_base):
|
|||||||
ca_subject = DN(('CN', 'Certificate Authority'), subject_base)
|
ca_subject = DN(('CN', 'Certificate Authority'), subject_base)
|
||||||
ca_nickname = None
|
ca_nickname = None
|
||||||
cache = {}
|
cache = {}
|
||||||
for nickname, trust_flags in nssdb.list_certs():
|
for nickname, _trust_flags in nssdb.list_certs():
|
||||||
cert = nssdb.get_cert(nickname, pem=True)
|
cert = nssdb.get_cert(nickname, pem=True)
|
||||||
|
|
||||||
nss_cert = x509.load_certificate(cert)
|
nss_cert = x509.load_certificate(cert)
|
||||||
|
@ -200,7 +200,7 @@ class PBKDF2KeyDerivation(XMLKeyDerivation):
|
|||||||
hash = [0] * mac.digest_size
|
hash = [0] * mac.digest_size
|
||||||
|
|
||||||
# Perform n iterations.
|
# Perform n iterations.
|
||||||
for j in xrange(self.iter):
|
for _j in xrange(self.iter):
|
||||||
tmp = mac.copy()
|
tmp = mac.copy()
|
||||||
tmp.update(last)
|
tmp.update(last)
|
||||||
last = tmp.digest()
|
last = tmp.digest()
|
||||||
|
@ -286,7 +286,7 @@ class KrbInstance(service.Service):
|
|||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
ipautil.run(args, nolog=(self.master_password,), stdin=''.join(dialogue))
|
ipautil.run(args, nolog=(self.master_password,), stdin=''.join(dialogue))
|
||||||
except ipautil.CalledProcessError as e:
|
except ipautil.CalledProcessError:
|
||||||
print("Failed to initialize the realm container")
|
print("Failed to initialize the realm container")
|
||||||
|
|
||||||
def __configure_instance(self):
|
def __configure_instance(self):
|
||||||
|
@ -848,7 +848,7 @@ class LDAPUpdate(object):
|
|||||||
def get_all_files(self, root, recursive=False):
|
def get_all_files(self, root, recursive=False):
|
||||||
"""Get all update files"""
|
"""Get all update files"""
|
||||||
f = []
|
f = []
|
||||||
for path, subdirs, files in os.walk(root):
|
for path, _subdirs, files in os.walk(root):
|
||||||
for name in files:
|
for name in files:
|
||||||
if fnmatch.fnmatch(name, "*.update"):
|
if fnmatch.fnmatch(name, "*.update"):
|
||||||
f.append(os.path.join(path, name))
|
f.append(os.path.join(path, name))
|
||||||
|
@ -152,7 +152,7 @@ class NTPInstance(service.Service):
|
|||||||
fd.write(line)
|
fd.write(line)
|
||||||
continue
|
continue
|
||||||
sline = sline.replace(NTPD_OPTS_QUOTE, '')
|
sline = sline.replace(NTPD_OPTS_QUOTE, '')
|
||||||
(variable, opts) = sline.split('=', 1)
|
(_variable, opts) = sline.split('=', 1)
|
||||||
fd.write(NTPD_OPTS_VAR + '="%s %s"\n' % (opts, ' '.join(newopts)))
|
fd.write(NTPD_OPTS_VAR + '="%s %s"\n' % (opts, ' '.join(newopts)))
|
||||||
done = True
|
done = True
|
||||||
else:
|
else:
|
||||||
|
@ -66,8 +66,6 @@ class ODSExporterInstance(service.Service):
|
|||||||
self.start_creation()
|
self.start_creation()
|
||||||
|
|
||||||
def __check_dnssec_status(self):
|
def __check_dnssec_status(self):
|
||||||
ods_enforcerd = services.knownservices.ods_enforcerd
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.ods_uid = pwd.getpwnam(constants.ODS_USER).pw_uid
|
self.ods_uid = pwd.getpwnam(constants.ODS_USER).pw_uid
|
||||||
except KeyError:
|
except KeyError:
|
||||||
|
@ -40,7 +40,7 @@ class update_default_range(Updater):
|
|||||||
dn = DN(self.api.env.container_ranges, self.api.env.basedn)
|
dn = DN(self.api.env.container_ranges, self.api.env.basedn)
|
||||||
search_filter = "objectclass=ipaDomainIDRange"
|
search_filter = "objectclass=ipaDomainIDRange"
|
||||||
try:
|
try:
|
||||||
(entries, truncated) = ldap.find_entries(search_filter, [], dn)
|
ldap.find_entries(search_filter, [], dn)
|
||||||
except errors.NotFound:
|
except errors.NotFound:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
@ -85,7 +85,7 @@ class update_default_range(Updater):
|
|||||||
search_filter = "objectclass=dnaSharedConfig"
|
search_filter = "objectclass=dnaSharedConfig"
|
||||||
attrs = ['dnaHostname', 'dnaRemainingValues']
|
attrs = ['dnaHostname', 'dnaRemainingValues']
|
||||||
try:
|
try:
|
||||||
(entries, truncated) = ldap.find_entries(search_filter, attrs, dn)
|
(entries, _truncated) = ldap.find_entries(search_filter, attrs, dn)
|
||||||
except errors.NotFound:
|
except errors.NotFound:
|
||||||
root_logger.warning("default_range: no dnaSharedConfig object found. "
|
root_logger.warning("default_range: no dnaSharedConfig object found. "
|
||||||
"Cannot check default range size.")
|
"Cannot check default range size.")
|
||||||
|
@ -278,7 +278,6 @@ class update_master_to_dnsforwardzones(DNSUpdater):
|
|||||||
backup_filename = u'dns-master-to-forward-zones-%Y-%m-%d-%H-%M-%S.ldif'
|
backup_filename = u'dns-master-to-forward-zones-%Y-%m-%d-%H-%M-%S.ldif'
|
||||||
|
|
||||||
def execute(self, **options):
|
def execute(self, **options):
|
||||||
ldap = self.api.Backend.ldap2
|
|
||||||
# check LDAP if forwardzones already uses new semantics
|
# check LDAP if forwardzones already uses new semantics
|
||||||
if not self.version_update_needed(target_version=1):
|
if not self.version_update_needed(target_version=1):
|
||||||
# forwardzones already uses new semantics,
|
# forwardzones already uses new semantics,
|
||||||
|
@ -132,7 +132,7 @@ class update_idrange_baserid(Updater):
|
|||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
(entries, truncated) = ldap.find_entries(
|
(entries, _truncated) = ldap.find_entries(
|
||||||
search_filter, ['ipabaserid'], base_dn,
|
search_filter, ['ipabaserid'], base_dn,
|
||||||
paged_search=True, time_limit=0, size_limit=0)
|
paged_search=True, time_limit=0, size_limit=0)
|
||||||
|
|
||||||
|
@ -433,7 +433,7 @@ class update_managed_permissions(Updater):
|
|||||||
else:
|
else:
|
||||||
if 'ipapermissiontype' not in legacy_entry:
|
if 'ipapermissiontype' not in legacy_entry:
|
||||||
if is_new:
|
if is_new:
|
||||||
acientry, acistr = (
|
_acientry, acistr = (
|
||||||
permission_plugin._get_aci_entry_and_string(
|
permission_plugin._get_aci_entry_and_string(
|
||||||
legacy_entry, notfound_ok=True))
|
legacy_entry, notfound_ok=True))
|
||||||
try:
|
try:
|
||||||
|
@ -60,7 +60,7 @@ class update_passync_privilege_update(Updater):
|
|||||||
self.api.env.basedn)
|
self.api.env.basedn)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
entry = ldap.get_entry(passsync_dn, [''])
|
ldap.get_entry(passsync_dn, [''])
|
||||||
except errors.NotFound:
|
except errors.NotFound:
|
||||||
root_logger.debug("PassSync user not found, no update needed")
|
root_logger.debug("PassSync user not found, no update needed")
|
||||||
sysupgrade.set_upgrade_state('winsync', 'passsync_privilege_updated', True)
|
sysupgrade.set_upgrade_state('winsync', 'passsync_privilege_updated', True)
|
||||||
|
@ -177,7 +177,7 @@ class update_uniqueness_plugins_to_new_syntax(Updater):
|
|||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
entries, truncated = ldap.find_entries(
|
entries, _truncated = ldap.find_entries(
|
||||||
filter=old_style_plugin_search_filter,
|
filter=old_style_plugin_search_filter,
|
||||||
base_dn=self.plugins_dn,
|
base_dn=self.plugins_dn,
|
||||||
)
|
)
|
||||||
|
@ -55,7 +55,7 @@ class update_upload_cacrt(Updater):
|
|||||||
if nickname == ca_nickname and ca_enabled:
|
if nickname == ca_nickname and ca_enabled:
|
||||||
trust_flags = 'CT,C,C'
|
trust_flags = 'CT,C,C'
|
||||||
cert = db.get_cert_from_db(nickname, pem=False)
|
cert = db.get_cert_from_db(nickname, pem=False)
|
||||||
trust, ca, eku = certstore.trust_flags_to_key_policy(trust_flags)
|
trust, _ca, eku = certstore.trust_flags_to_key_policy(trust_flags)
|
||||||
|
|
||||||
dn = DN(('cn', nickname), ('cn', 'certificates'), ('cn', 'ipa'),
|
dn = DN(('cn', nickname), ('cn', 'certificates'), ('cn', 'ipa'),
|
||||||
('cn','etc'), self.api.env.basedn)
|
('cn','etc'), self.api.env.basedn)
|
||||||
|
@ -125,7 +125,7 @@ def update_schema(schema_files, ldapi=False, dm_password=None,):
|
|||||||
|
|
||||||
for filename in schema_files:
|
for filename in schema_files:
|
||||||
log.debug('Processing schema LDIF file %s', filename)
|
log.debug('Processing schema LDIF file %s', filename)
|
||||||
dn, new_schema = ldap.schema.subentry.urlfetch(filename)
|
_dn, new_schema = ldap.schema.subentry.urlfetch(filename)
|
||||||
|
|
||||||
for attrname, cls in SCHEMA_ELEMENT_CLASSES:
|
for attrname, cls in SCHEMA_ELEMENT_CLASSES:
|
||||||
for oids_set in _get_oid_dependency_order(new_schema, cls):
|
for oids_set in _get_oid_dependency_order(new_schema, cls):
|
||||||
|
@ -116,7 +116,7 @@ def find_providing_server(svcname, conn, host_name=None, api=api):
|
|||||||
'ipaConfigString': 'enabledService',
|
'ipaConfigString': 'enabledService',
|
||||||
'cn': svcname}, rules='&')
|
'cn': svcname}, rules='&')
|
||||||
try:
|
try:
|
||||||
entries, trunc = conn.find_entries(filter=query_filter, base_dn=dn)
|
entries, _trunc = conn.find_entries(filter=query_filter, base_dn=dn)
|
||||||
except errors.NotFound:
|
except errors.NotFound:
|
||||||
return None
|
return None
|
||||||
if len(entries):
|
if len(entries):
|
||||||
@ -523,7 +523,7 @@ class Service(object):
|
|||||||
search_kw = {'ipaConfigString': u'enabledService'}
|
search_kw = {'ipaConfigString': u'enabledService'}
|
||||||
filter = self.admin_conn.make_filter(search_kw)
|
filter = self.admin_conn.make_filter(search_kw)
|
||||||
try:
|
try:
|
||||||
entries, truncated = self.admin_conn.find_entries(
|
entries, _truncated = self.admin_conn.find_entries(
|
||||||
filter=filter,
|
filter=filter,
|
||||||
attrs_list=['ipaConfigString'],
|
attrs_list=['ipaConfigString'],
|
||||||
base_dn=entry_dn,
|
base_dn=entry_dn,
|
||||||
|
@ -293,7 +293,7 @@ class config_mod(LDAPUpdate):
|
|||||||
for field in entry_attrs[k].split(',')]
|
for field in entry_attrs[k].split(',')]
|
||||||
# test if all base types (without sub-types) are allowed
|
# test if all base types (without sub-types) are allowed
|
||||||
for a in attributes:
|
for a in attributes:
|
||||||
a, tomato, olive = a.partition(';')
|
a, _dummy, _dummy = a.partition(';')
|
||||||
if a not in allowed_attrs:
|
if a not in allowed_attrs:
|
||||||
raise errors.ValidationError(
|
raise errors.ValidationError(
|
||||||
name=k, error=_('attribute "%s" not allowed') % a
|
name=k, error=_('attribute "%s" not allowed') % a
|
||||||
@ -325,7 +325,7 @@ class config_mod(LDAPUpdate):
|
|||||||
if self.api.Object[obj].uuid_attribute:
|
if self.api.Object[obj].uuid_attribute:
|
||||||
checked_attrs = checked_attrs + [self.api.Object[obj].uuid_attribute]
|
checked_attrs = checked_attrs + [self.api.Object[obj].uuid_attribute]
|
||||||
for obj_attr in checked_attrs:
|
for obj_attr in checked_attrs:
|
||||||
obj_attr, tomato, olive = obj_attr.partition(';')
|
obj_attr, _dummy, _dummy = obj_attr.partition(';')
|
||||||
if obj_attr in OPERATIONAL_ATTRIBUTES:
|
if obj_attr in OPERATIONAL_ATTRIBUTES:
|
||||||
continue
|
continue
|
||||||
if obj_attr in self.api.Object[obj].params and \
|
if obj_attr in self.api.Object[obj].params and \
|
||||||
|
@ -60,7 +60,7 @@ def get_master_entries(ldap, api):
|
|||||||
api.env.basedn
|
api.env.basedn
|
||||||
)
|
)
|
||||||
|
|
||||||
masters, _ = ldap.find_entries(
|
masters, _dummy = ldap.find_entries(
|
||||||
filter="(cn=*)",
|
filter="(cn=*)",
|
||||||
base_dn=container_masters,
|
base_dn=container_masters,
|
||||||
scope=ldap.SCOPE_ONELEVEL,
|
scope=ldap.SCOPE_ONELEVEL,
|
||||||
|
@ -329,7 +329,7 @@ class group_del(LDAPDelete):
|
|||||||
assert isinstance(dn, DN)
|
assert isinstance(dn, DN)
|
||||||
config = ldap.get_ipa_config()
|
config = ldap.get_ipa_config()
|
||||||
def_primary_group = config.get('ipadefaultprimarygroup', '')
|
def_primary_group = config.get('ipadefaultprimarygroup', '')
|
||||||
def_primary_group_dn = group_dn = self.obj.get_dn(def_primary_group)
|
def_primary_group_dn = self.obj.get_dn(def_primary_group)
|
||||||
if dn == def_primary_group_dn:
|
if dn == def_primary_group_dn:
|
||||||
raise errors.DefaultGroupError()
|
raise errors.DefaultGroupError()
|
||||||
group_attrs = self.obj.methods.show(
|
group_attrs = self.obj.methods.show(
|
||||||
|
@ -411,7 +411,8 @@ class hbactest(Command):
|
|||||||
ldap = self.api.Backend.ldap2
|
ldap = self.api.Backend.ldap2
|
||||||
group_container = DN(api.env.container_group, api.env.basedn)
|
group_container = DN(api.env.container_group, api.env.basedn)
|
||||||
try:
|
try:
|
||||||
entries, truncated = ldap.find_entries(filter_sids, ['memberof'], group_container)
|
entries, _truncated = ldap.find_entries(
|
||||||
|
filter_sids, ['memberof'], group_container)
|
||||||
except errors.NotFound:
|
except errors.NotFound:
|
||||||
request.user.groups = []
|
request.user.groups = []
|
||||||
else:
|
else:
|
||||||
|
@ -601,7 +601,7 @@ class host(LDAPObject):
|
|||||||
managed_hosts = []
|
managed_hosts = []
|
||||||
|
|
||||||
try:
|
try:
|
||||||
(hosts, truncated) = ldap.find_entries(
|
(hosts, _truncated) = ldap.find_entries(
|
||||||
base_dn=DN(self.container_dn, api.env.basedn),
|
base_dn=DN(self.container_dn, api.env.basedn),
|
||||||
filter=host_filter, attrs_list=host_attrs)
|
filter=host_filter, attrs_list=host_attrs)
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ def validate_permission_to_privilege(api, permission):
|
|||||||
'(objectClass=ipaPermissionV2)', '(!(ipaPermBindRuleType=permission))',
|
'(objectClass=ipaPermissionV2)', '(!(ipaPermBindRuleType=permission))',
|
||||||
ldap.make_filter_from_attr('cn', permission, rules='|')])
|
ldap.make_filter_from_attr('cn', permission, rules='|')])
|
||||||
try:
|
try:
|
||||||
entries, truncated = ldap.find_entries(
|
entries, _truncated = ldap.find_entries(
|
||||||
filter=ldapfilter,
|
filter=ldapfilter,
|
||||||
attrs_list=['cn', 'ipapermbindruletype'],
|
attrs_list=['cn', 'ipapermbindruletype'],
|
||||||
base_dn=DN(api.env.container_permission, api.env.basedn),
|
base_dn=DN(api.env.container_permission, api.env.basedn),
|
||||||
|
@ -109,7 +109,7 @@ def validate_selinuxuser(ugettext, user):
|
|||||||
|
|
||||||
# If we add in ::: we don't have to check to see if some values are
|
# If we add in ::: we don't have to check to see if some values are
|
||||||
# empty
|
# empty
|
||||||
(name, mls, mcs, ignore) = (user + ':::').split(':', 3)
|
(name, mls, mcs, _ignore) = (user + ':::').split(':', 3)
|
||||||
|
|
||||||
if not regex_name.match(name):
|
if not regex_name.match(name):
|
||||||
return _('Invalid SELinux user name, only a-Z and _ are allowed')
|
return _('Invalid SELinux user name, only a-Z and _ are allowed')
|
||||||
|
@ -889,7 +889,7 @@ class server_conncheck(crud.PKQuery):
|
|||||||
follow_name_owner_changes=True)
|
follow_name_owner_changes=True)
|
||||||
server = dbus.Interface(obj, 'org.freeipa.server')
|
server = dbus.Interface(obj, 'org.freeipa.server')
|
||||||
|
|
||||||
ret, stdout, stderr = server.conncheck(keys[-1])
|
ret, stdout, _stderr = server.conncheck(keys[-1])
|
||||||
|
|
||||||
result = dict(
|
result = dict(
|
||||||
result=(ret == 0),
|
result=(ret == 0),
|
||||||
|
@ -284,7 +284,7 @@ def check_required_principal(ldap, principal):
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
host_is_master(ldap, principal.hostname)
|
host_is_master(ldap, principal.hostname)
|
||||||
except errors.ValidationError as e:
|
except errors.ValidationError:
|
||||||
service_types = ['HTTP', 'ldap', 'DNS', 'dogtagldap']
|
service_types = ['HTTP', 'ldap', 'DNS', 'dogtagldap']
|
||||||
if principal.service_name in service_types:
|
if principal.service_name in service_types:
|
||||||
raise errors.ValidationError(name='principal', error=_('This principal is required by the IPA master'))
|
raise errors.ValidationError(name='principal', error=_('This principal is required by the IPA master'))
|
||||||
|
@ -164,7 +164,7 @@ class sudocmd_del(LDAPDelete):
|
|||||||
ldap.MATCH_ALL)
|
ldap.MATCH_ALL)
|
||||||
dependent_sudorules = []
|
dependent_sudorules = []
|
||||||
try:
|
try:
|
||||||
entries, truncated = ldap.find_entries(
|
entries, _truncated = ldap.find_entries(
|
||||||
filter, ['cn'],
|
filter, ['cn'],
|
||||||
base_dn=DN(api.env.container_sudorule, api.env.basedn))
|
base_dn=DN(api.env.container_sudorule, api.env.basedn))
|
||||||
except errors.NotFound:
|
except errors.NotFound:
|
||||||
|
@ -961,7 +961,7 @@ class MemcacheSessionManager(SessionManager):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
session_cookie = Cookie.get_named_cookie_from_string(cookie_header, self.session_cookie_name)
|
session_cookie = Cookie.get_named_cookie_from_string(cookie_header, self.session_cookie_name)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
session_cookie = None
|
session_cookie = None
|
||||||
if session_cookie:
|
if session_cookie:
|
||||||
session_id = session_cookie.value
|
session_id = session_cookie.value
|
||||||
|
Loading…
Reference in New Issue
Block a user