mirror of
https://salsa.debian.org/freeipa-team/freeipa.git
synced 2025-02-25 18:55:28 -06:00
logging: do not reference loggers in arguments and attributes
Remove logger arguments in all functions and logger attributes in all objects, with the exception of API object logger, which is now deprecated. Replace affected logger calls with module-level logger calls. Reviewed-By: Martin Basti <mbasti@redhat.com>
This commit is contained in:
committed by
Martin Basti
parent
bccb243b05
commit
ab9d1e75fc
@@ -22,6 +22,7 @@
|
||||
# Make sure we only run this module at the server where samba4-python
|
||||
# package is installed to avoid issues with unavailable modules
|
||||
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
|
||||
@@ -76,6 +77,8 @@ The code in this module relies heavily on samba4-python package
|
||||
and Samba4 python bindings.
|
||||
""")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Both constants can be used as masks against trust direction
|
||||
# because bi-directional has two lower bits set.
|
||||
TRUST_ONEWAY = 1
|
||||
@@ -257,8 +260,8 @@ class DomainValidator(object):
|
||||
except KeyError as exc:
|
||||
# Some piece of trusted domain info in LDAP is missing
|
||||
# Skip the domain, but leave log entry for investigation
|
||||
api.log.warning("Trusted domain '%s' entry misses an "
|
||||
"attribute: %s", e.dn, exc)
|
||||
logger.warning("Trusted domain '%s' entry misses an "
|
||||
"attribute: %s", e.dn, exc)
|
||||
continue
|
||||
|
||||
result[t_partner] = (fname_norm,
|
||||
|
||||
@@ -2,8 +2,12 @@
|
||||
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
|
||||
#
|
||||
|
||||
import logging
|
||||
|
||||
from ipaserver import p11helper as _ipap11helper
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
attrs_id2name = {
|
||||
#_ipap11helper.CKA_ALLOWED_MECHANISMS: 'ipk11allowedmechanisms',
|
||||
_ipap11helper.CKA_ALWAYS_AUTHENTICATE: 'ipk11alwaysauthenticate',
|
||||
@@ -106,7 +110,8 @@ modifiable_attrs_id2name = {
|
||||
|
||||
modifiable_attrs_name2id = {v: k for k, v in modifiable_attrs_id2name.items()}
|
||||
|
||||
def sync_pkcs11_metadata(log, source, target):
|
||||
|
||||
def sync_pkcs11_metadata(name, source, target):
|
||||
"""sync ipk11 metadata from source object to target object"""
|
||||
|
||||
# iterate over list of modifiable PKCS#11 attributes - this prevents us
|
||||
@@ -114,7 +119,11 @@ def sync_pkcs11_metadata(log, source, target):
|
||||
for attr in modifiable_attrs_name2id:
|
||||
if attr in source:
|
||||
if source[attr] != target[attr]:
|
||||
log.debug('Updating attribute %s from "%s" to "%s"', attr, repr(source[attr]), repr(target[attr]))
|
||||
logger.debug('%s: Updating attribute %s from "%s" to "%s"',
|
||||
name,
|
||||
attr,
|
||||
repr(source[attr]),
|
||||
repr(target[attr]))
|
||||
target[attr] = source[attr]
|
||||
|
||||
def populate_pkcs11_metadata(source, target):
|
||||
|
||||
@@ -123,7 +123,6 @@ class Key(collections.MutableMapping):
|
||||
self._delentry = None # indicates that object was deleted
|
||||
self.ldap = ldap
|
||||
self.ldapkeydb = ldapkeydb
|
||||
self.log = logger.getChild(__name__)
|
||||
|
||||
def __assert_not_deleted(self):
|
||||
assert self.entry and not self._delentry, (
|
||||
@@ -197,9 +196,9 @@ class Key(collections.MutableMapping):
|
||||
assert not self.entry, (
|
||||
"Key._delete_key() called before Key.schedule_deletion()")
|
||||
assert self._delentry, "Key._delete_key() called more than once"
|
||||
self.log.debug('deleting key id 0x%s DN %s from LDAP',
|
||||
hexlify(self._delentry.single_value['ipk11id']),
|
||||
self._delentry.dn)
|
||||
logger.debug('deleting key id 0x%s DN %s from LDAP',
|
||||
hexlify(self._delentry.single_value['ipk11id']),
|
||||
self._delentry.dn)
|
||||
self.ldap.delete_entry(self._delentry)
|
||||
self._delentry = None
|
||||
self.ldap = None
|
||||
@@ -259,10 +258,11 @@ class MasterKey(Key):
|
||||
ipaWrappingKey=wrapping_key_uri,
|
||||
ipaWrappingMech=wrapping_mech)
|
||||
|
||||
self.log.info('adding master key 0x%s wrapped with replica key 0x%s to %s',
|
||||
hexlify(self['ipk11id']),
|
||||
hexlify(replica_key_id),
|
||||
entry_dn)
|
||||
logger.info('adding master key 0x%s wrapped with replica key 0x%s to '
|
||||
'%s',
|
||||
hexlify(self['ipk11id']),
|
||||
hexlify(replica_key_id),
|
||||
entry_dn)
|
||||
self.ldap.add_entry(entry)
|
||||
if 'ipaSecretKeyRef' not in self.entry:
|
||||
self.entry['objectClass'] += ['ipaSecretKeyRefObject']
|
||||
@@ -270,10 +270,9 @@ class MasterKey(Key):
|
||||
|
||||
|
||||
class LdapKeyDB(AbstractHSM):
|
||||
def __init__(self, log, ldap, base_dn):
|
||||
def __init__(self, ldap, base_dn):
|
||||
self.ldap = ldap
|
||||
self.base_dn = base_dn
|
||||
self.log = log
|
||||
self.cache_replica_pubkeys_wrap = None
|
||||
self.cache_masterkeys = None
|
||||
self.cache_zone_keypairs = None
|
||||
@@ -348,7 +347,7 @@ class LdapKeyDB(AbstractHSM):
|
||||
new_key = self._import_keys_metadata(
|
||||
[(mkey, _ipap11helper.KEY_CLASS_SECRET_KEY)])
|
||||
self.ldap.add_entry(new_key.entry)
|
||||
self.log.debug('imported master key metadata: %s', new_key.entry)
|
||||
logger.debug('imported master key metadata: %s', new_key.entry)
|
||||
|
||||
def import_zone_key(self, pubkey, pubkey_data, privkey,
|
||||
privkey_wrapped_data, wrapping_mech, master_key_id):
|
||||
@@ -366,7 +365,7 @@ class LdapKeyDB(AbstractHSM):
|
||||
new_key.entry['ipaPublicKey'] = pubkey_data
|
||||
|
||||
self.ldap.add_entry(new_key.entry)
|
||||
self.log.debug('imported zone key id: 0x%s', hexlify(new_key['ipk11id']))
|
||||
logger.debug('imported zone key id: 0x%s', hexlify(new_key['ipk11id']))
|
||||
|
||||
@property
|
||||
def replica_pubkeys_wrap(self):
|
||||
@@ -431,9 +430,10 @@ if __name__ == '__main__':
|
||||
ldap.gssapi_bind()
|
||||
log.debug('Connected')
|
||||
|
||||
ldapkeydb = LdapKeyDB(log, ldap, DN(('cn', 'keys'), ('cn', 'sec'),
|
||||
ipalib.api.env.container_dns,
|
||||
ipalib.api.env.basedn))
|
||||
ldapkeydb = LdapKeyDB(ldap, DN(('cn', 'keys'),
|
||||
('cn', 'sec'),
|
||||
ipalib.api.env.container_dns,
|
||||
ipalib.api.env.basedn))
|
||||
|
||||
print('replica public keys: CKA_WRAP = TRUE')
|
||||
print('====================================')
|
||||
|
||||
@@ -7,7 +7,6 @@ from __future__ import print_function
|
||||
|
||||
from binascii import hexlify
|
||||
import collections
|
||||
import logging
|
||||
import os
|
||||
from pprint import pprint
|
||||
|
||||
@@ -94,7 +93,6 @@ class LocalHSM(AbstractHSM):
|
||||
def __init__(self, library, label, pin):
|
||||
self.cache_replica_pubkeys = None
|
||||
self.p11 = _ipap11helper.P11_Helper(label, pin, library)
|
||||
self.log = logging.getLogger()
|
||||
|
||||
def __del__(self):
|
||||
self.p11.finalize()
|
||||
|
||||
@@ -503,7 +503,7 @@ def check_forwarders(dns_forwarders, logger):
|
||||
for forwarder in dns_forwarders:
|
||||
logger.debug("Checking DNS server: %s", forwarder)
|
||||
try:
|
||||
validate_dnssec_global_forwarder(forwarder, log=logger)
|
||||
validate_dnssec_global_forwarder(forwarder)
|
||||
except DNSSECSignatureMissingError as e:
|
||||
forwarders_dnssec_valid = False
|
||||
logger.warning("DNS server %s does not support DNSSEC: %s",
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import logging
|
||||
import os
|
||||
import pwd
|
||||
import grp
|
||||
@@ -25,6 +26,8 @@ from ipalib import errors, api
|
||||
from ipalib.constants import SOFTHSM_DNSSEC_TOKEN_LABEL
|
||||
from ipaserver.install.bindinstance import dns_container_exists
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
replica_keylabel_template = u"dnssec-replica:%s"
|
||||
|
||||
|
||||
@@ -51,7 +54,6 @@ class DNSKeySyncInstance(service.Service):
|
||||
service_prefix=u'ipa-dnskeysyncd',
|
||||
keytab=paths.IPA_DNSKEYSYNCD_KEYTAB
|
||||
)
|
||||
self.logger = logger
|
||||
self.extra_config = [u'dnssecVersion 1', ] # DNSSEC enabled
|
||||
self.named_uid = None
|
||||
self.named_gid = None
|
||||
@@ -156,7 +158,7 @@ class DNSKeySyncInstance(service.Service):
|
||||
"""
|
||||
if dnssec_container_exists(self.suffix):
|
||||
|
||||
self.logger.info("DNSSEC container exists (step skipped)")
|
||||
logger.info("DNSSEC container exists (step skipped)")
|
||||
return
|
||||
|
||||
self._ldap_mod("dnssec.ldif", {'SUFFIX': self.suffix, })
|
||||
@@ -169,7 +171,7 @@ class DNSKeySyncInstance(service.Service):
|
||||
|
||||
# create dnssec directory
|
||||
if not os.path.exists(paths.IPA_DNSSEC_DIR):
|
||||
self.logger.debug("Creating %s directory", paths.IPA_DNSSEC_DIR)
|
||||
logger.debug("Creating %s directory", paths.IPA_DNSSEC_DIR)
|
||||
os.mkdir(paths.IPA_DNSSEC_DIR)
|
||||
os.chmod(paths.IPA_DNSSEC_DIR, 0o770)
|
||||
# chown ods:named
|
||||
@@ -182,7 +184,7 @@ class DNSKeySyncInstance(service.Service):
|
||||
"objectstore.backend = file") % {
|
||||
'tokens_dir': paths.DNSSEC_TOKENS_DIR
|
||||
}
|
||||
self.logger.debug("Creating new softhsm config file")
|
||||
logger.debug("Creating new softhsm config file")
|
||||
named_fd = open(paths.DNSSEC_SOFTHSM2_CONF, 'w')
|
||||
named_fd.seek(0)
|
||||
named_fd.truncate(0)
|
||||
@@ -208,13 +210,12 @@ class DNSKeySyncInstance(service.Service):
|
||||
|
||||
# remove old tokens
|
||||
if token_dir_exists:
|
||||
self.logger.debug('Removing old tokens directory %s',
|
||||
paths.DNSSEC_TOKENS_DIR)
|
||||
logger.debug('Removing old tokens directory %s',
|
||||
paths.DNSSEC_TOKENS_DIR)
|
||||
shutil.rmtree(paths.DNSSEC_TOKENS_DIR)
|
||||
|
||||
# create tokens subdirectory
|
||||
self.logger.debug('Creating tokens %s directory',
|
||||
paths.DNSSEC_TOKENS_DIR)
|
||||
logger.debug('Creating tokens %s directory', paths.DNSSEC_TOKENS_DIR)
|
||||
# sticky bit is required by daemon
|
||||
os.mkdir(paths.DNSSEC_TOKENS_DIR)
|
||||
os.chmod(paths.DNSSEC_TOKENS_DIR, 0o770 | stat.S_ISGID)
|
||||
@@ -228,7 +229,7 @@ class DNSKeySyncInstance(service.Service):
|
||||
pin_so = ipautil.ipa_generate_password(
|
||||
entropy_bits=0, special=None, min_len=pin_length)
|
||||
|
||||
self.logger.debug("Saving user PIN to %s", paths.DNSSEC_SOFTHSM_PIN)
|
||||
logger.debug("Saving user PIN to %s", paths.DNSSEC_SOFTHSM_PIN)
|
||||
named_fd = open(paths.DNSSEC_SOFTHSM_PIN, 'w')
|
||||
named_fd.seek(0)
|
||||
named_fd.truncate(0)
|
||||
@@ -238,7 +239,7 @@ class DNSKeySyncInstance(service.Service):
|
||||
# chown to ods:named
|
||||
os.chown(paths.DNSSEC_SOFTHSM_PIN, self.ods_uid, self.named_gid)
|
||||
|
||||
self.logger.debug("Saving SO PIN to %s", paths.DNSSEC_SOFTHSM_PIN_SO)
|
||||
logger.debug("Saving SO PIN to %s", paths.DNSSEC_SOFTHSM_PIN_SO)
|
||||
named_fd = open(paths.DNSSEC_SOFTHSM_PIN_SO, 'w')
|
||||
named_fd.seek(0)
|
||||
named_fd.truncate(0)
|
||||
@@ -257,7 +258,7 @@ class DNSKeySyncInstance(service.Service):
|
||||
'--pin', pin,
|
||||
'--so-pin', pin_so,
|
||||
]
|
||||
self.logger.debug("Initializing tokens")
|
||||
logger.debug("Initializing tokens")
|
||||
os.environ["SOFTHSM2_CONF"] = paths.DNSSEC_SOFTHSM2_CONF
|
||||
ipautil.run(command, nolog=(pin, pin_so,))
|
||||
|
||||
@@ -277,7 +278,7 @@ class DNSKeySyncInstance(service.Service):
|
||||
|
||||
try:
|
||||
# generate replica keypair
|
||||
self.logger.debug("Creating replica's key pair")
|
||||
logger.debug("Creating replica's key pair")
|
||||
key_id = None
|
||||
while True:
|
||||
# check if key with this ID exist in softHSM
|
||||
@@ -325,14 +326,14 @@ class DNSKeySyncInstance(service.Service):
|
||||
'ipk11VerifyRecover': [False],
|
||||
}
|
||||
|
||||
self.logger.debug("Storing replica public key to LDAP, %s",
|
||||
replica_pubkey_dn)
|
||||
logger.debug("Storing replica public key to LDAP, %s",
|
||||
replica_pubkey_dn)
|
||||
|
||||
entry = ldap.make_entry(replica_pubkey_dn, **kw)
|
||||
ldap.add_entry(entry)
|
||||
self.logger.debug("Replica public key stored")
|
||||
logger.debug("Replica public key stored")
|
||||
|
||||
self.logger.debug("Setting CKA_WRAP=False for old replica keys")
|
||||
logger.debug("Setting CKA_WRAP=False for old replica keys")
|
||||
# first create new keys, we don't want disable keys before, we
|
||||
# have new keys in softhsm and LDAP
|
||||
|
||||
@@ -366,7 +367,7 @@ class DNSKeySyncInstance(service.Service):
|
||||
p11.finalize()
|
||||
|
||||
# change tokens mod/owner
|
||||
self.logger.debug("Changing ownership of token files")
|
||||
logger.debug("Changing ownership of token files")
|
||||
for (root, dirs, files) in os.walk(paths.DNSSEC_TOKENS_DIR):
|
||||
for directory in dirs:
|
||||
dir_path = os.path.join(root, directory)
|
||||
@@ -384,7 +385,7 @@ class DNSKeySyncInstance(service.Service):
|
||||
self.ldap_enable('DNSKeySync', self.fqdn, None,
|
||||
self.suffix, self.extra_config)
|
||||
except errors.DuplicateEntry:
|
||||
self.logger.error("DNSKeySync service already exists")
|
||||
logger.error("DNSKeySync service already exists")
|
||||
|
||||
def __setup_principal(self):
|
||||
assert self.ods_gid is not None
|
||||
@@ -415,8 +416,8 @@ class DNSKeySyncInstance(service.Service):
|
||||
except ldap.TYPE_OR_VALUE_EXISTS:
|
||||
pass
|
||||
except Exception as e:
|
||||
self.logger.critical("Could not modify principal's %s entry: %s"
|
||||
% (dnssynckey_principal_dn, str(e)))
|
||||
logger.critical("Could not modify principal's %s entry: %s",
|
||||
dnssynckey_principal_dn, str(e))
|
||||
raise
|
||||
|
||||
# bind-dyndb-ldap persistent search feature requires both size and time
|
||||
@@ -429,8 +430,8 @@ class DNSKeySyncInstance(service.Service):
|
||||
try:
|
||||
api.Backend.ldap2.modify_s(dnssynckey_principal_dn, mod)
|
||||
except Exception as e:
|
||||
self.logger.critical("Could not set principal's %s LDAP limits: %s"
|
||||
% (dnssynckey_principal_dn, str(e)))
|
||||
logger.critical("Could not set principal's %s LDAP limits: %s",
|
||||
dnssynckey_principal_dn, str(e))
|
||||
raise
|
||||
|
||||
def __start(self):
|
||||
@@ -438,7 +439,7 @@ class DNSKeySyncInstance(service.Service):
|
||||
self.restart()
|
||||
except Exception as e:
|
||||
print("Failed to start ipa-dnskeysyncd")
|
||||
self.logger.debug("Failed to start ipa-dnskeysyncd: %s", e)
|
||||
logger.debug("Failed to start ipa-dnskeysyncd: %s", e)
|
||||
|
||||
|
||||
def uninstall(self):
|
||||
@@ -458,7 +459,7 @@ class DNSKeySyncInstance(service.Service):
|
||||
try:
|
||||
self.fstore.restore_file(f)
|
||||
except ValueError as error:
|
||||
self.logger.debug(error)
|
||||
logger.debug('%s', error)
|
||||
|
||||
# remove softhsm pin, to make sure new installation will generate
|
||||
# new token database
|
||||
|
||||
@@ -109,7 +109,6 @@ class RemoveRUVParser(ldif.LDIFParser):
|
||||
def __init__(self, input_file, writer):
|
||||
ldif.LDIFParser.__init__(self, input_file)
|
||||
self.writer = writer
|
||||
self.log = logger
|
||||
|
||||
def handle(self, dn, entry):
|
||||
objectclass = None
|
||||
@@ -125,7 +124,7 @@ class RemoveRUVParser(ldif.LDIFParser):
|
||||
if (objectclass and nsuniqueid and
|
||||
'nstombstone' in objectclass and
|
||||
'ffffffff-ffffffff-ffffffff-ffffffff' in nsuniqueid):
|
||||
self.log.debug("Removing RUV entry %s", dn)
|
||||
logger.debug("Removing RUV entry %s", dn)
|
||||
return
|
||||
|
||||
self.writer.unparse(dn, entry)
|
||||
|
||||
@@ -414,16 +414,15 @@ class _sssd(object):
|
||||
:raise RemoteRetrieveError: if DBus error occurs
|
||||
"""
|
||||
try:
|
||||
self.log = logger
|
||||
self._bus = dbus.SystemBus()
|
||||
self._users_obj = self._bus.get_object(
|
||||
DBUS_SSSD_NAME, DBUS_SSSD_USERS_PATH)
|
||||
self._users_iface = dbus.Interface(
|
||||
self._users_obj, DBUS_SSSD_USERS_IF)
|
||||
except dbus.DBusException as e:
|
||||
self.log.error(
|
||||
'Failed to initialize DBus interface {iface}. DBus '
|
||||
'exception is {exc}.'.format(iface=DBUS_SSSD_USERS_IF, exc=e)
|
||||
logger.error(
|
||||
'Failed to initialize DBus interface %s. DBus '
|
||||
'exception is %s.', DBUS_SSSD_USERS_IF, e
|
||||
)
|
||||
raise errors.RemoteRetrieveError(
|
||||
reason=_('Failed to connect to sssd over SystemBus. '
|
||||
@@ -469,9 +468,9 @@ class _sssd(object):
|
||||
# exception and return an empty list
|
||||
if err_name == 'org.freedesktop.sssd.Error.NotFound':
|
||||
return dict()
|
||||
self.log.error(
|
||||
'Failed to use interface {iface}. DBus '
|
||||
'exception is {exc}.'.format(iface=DBUS_SSSD_USERS_IF, exc=e))
|
||||
logger.error(
|
||||
'Failed to use interface %s. DBus '
|
||||
'exception is %s.', DBUS_SSSD_USERS_IF, e)
|
||||
raise errors.RemoteRetrieveError(
|
||||
reason=_('Failed to find users over SystemBus. '
|
||||
' See details in the error_log'))
|
||||
|
||||
@@ -145,7 +145,7 @@ def remove_ptr_rec(ipaddr, fqdn):
|
||||
Remove PTR record of IP address (ipaddr)
|
||||
:return: True if PTR record was removed, False if record was not found
|
||||
"""
|
||||
api.log.debug('deleting PTR record of ipaddr %s', ipaddr)
|
||||
logger.debug('deleting PTR record of ipaddr %s', ipaddr)
|
||||
try:
|
||||
revzone, revname = get_reverse_zone(ipaddr)
|
||||
|
||||
@@ -155,7 +155,7 @@ def remove_ptr_rec(ipaddr, fqdn):
|
||||
|
||||
api.Command['dnsrecord_del'](revzone, revname, **delkw)
|
||||
except (errors.NotFound, errors.AttrValueNotFound):
|
||||
api.log.debug('PTR record of ipaddr %s not found', ipaddr)
|
||||
logger.debug('PTR record of ipaddr %s not found', ipaddr)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@@ -193,8 +193,9 @@ def _pre_migrate_user(ldap, pkey, dn, entry_attrs, failed, config, ctx, **kwargs
|
||||
# See if the gidNumber at least points to a valid group on the remote
|
||||
# server.
|
||||
if entry_attrs['gidnumber'][0] in invalid_gids:
|
||||
api.log.warning('GID number %s of migrated user %s does not point to a known group.' \
|
||||
% (entry_attrs['gidnumber'][0], pkey))
|
||||
logger.warning('GID number %s of migrated user %s does not point '
|
||||
'to a known group.',
|
||||
entry_attrs['gidnumber'][0], pkey)
|
||||
elif entry_attrs['gidnumber'][0] not in valid_gids:
|
||||
try:
|
||||
remote_entry = ds_ldap.find_entry_by_attr(
|
||||
@@ -203,15 +204,18 @@ def _pre_migrate_user(ldap, pkey, dn, entry_attrs, failed, config, ctx, **kwargs
|
||||
)
|
||||
valid_gids.add(entry_attrs['gidnumber'][0])
|
||||
except errors.NotFound:
|
||||
api.log.warning('GID number %s of migrated user %s does not point to a known group.' \
|
||||
% (entry_attrs['gidnumber'][0], pkey))
|
||||
logger.warning('GID number %s of migrated user %s does not '
|
||||
'point to a known group.',
|
||||
entry_attrs['gidnumber'][0], pkey)
|
||||
invalid_gids.add(entry_attrs['gidnumber'][0])
|
||||
except errors.SingleMatchExpected as e:
|
||||
# GID number matched more groups, this should not happen
|
||||
api.log.warning('GID number %s of migrated user %s should match 1 group, but it matched %d groups' \
|
||||
% (entry_attrs['gidnumber'][0], pkey, e.found))
|
||||
logger.warning('GID number %s of migrated user %s should '
|
||||
'match 1 group, but it matched %d groups',
|
||||
entry_attrs['gidnumber'][0], pkey, e.found)
|
||||
except errors.LimitsExceeded as e:
|
||||
api.log.warning('Search limit exceeded searching for GID %s' % entry_attrs['gidnumber'][0])
|
||||
logger.warning('Search limit exceeded searching for GID %s',
|
||||
entry_attrs['gidnumber'][0])
|
||||
|
||||
# We don't want to create a UPG so set the magic value in description
|
||||
# to let the DS plugin know.
|
||||
@@ -254,19 +258,22 @@ def _pre_migrate_user(ldap, pkey, dn, entry_attrs, failed, config, ctx, **kwargs
|
||||
# value is not DN instance, the automatic encoding may have
|
||||
# failed due to missing schema or the remote attribute type OID was
|
||||
# not detected as DN type. Try to work this around
|
||||
api.log.debug('%s: value %s of type %s in attribute %s is not a DN'
|
||||
', convert it', pkey, value, type(value), attr)
|
||||
logger.debug('%s: value %s of type %s in attribute %s is '
|
||||
'not a DN, convert it',
|
||||
pkey, value, type(value), attr)
|
||||
try:
|
||||
value = DN(value)
|
||||
except ValueError as e:
|
||||
api.log.warning('%s: skipping normalization of value %s of type %s '
|
||||
'in attribute %s which could not be converted to DN: %s',
|
||||
pkey, value, type(value), attr, e)
|
||||
logger.warning('%s: skipping normalization of value '
|
||||
'%s of type %s in attribute %s which '
|
||||
'could not be converted to DN: %s',
|
||||
pkey, value, type(value), attr, e)
|
||||
continue
|
||||
try:
|
||||
remote_entry = ds_ldap.get_entry(value, [api.Object.user.primary_key.name, api.Object.group.primary_key.name])
|
||||
except errors.NotFound:
|
||||
api.log.warning('%s: attribute %s refers to non-existent entry %s' % (pkey, attr, value))
|
||||
logger.warning('%s: attribute %s refers to non-existent '
|
||||
'entry %s', pkey, attr, value)
|
||||
continue
|
||||
if value.endswith(search_bases['user']):
|
||||
primary_key = api.Object.user.primary_key.name
|
||||
@@ -275,14 +282,18 @@ def _pre_migrate_user(ldap, pkey, dn, entry_attrs, failed, config, ctx, **kwargs
|
||||
primary_key = api.Object.group.primary_key.name
|
||||
container = api.env.container_group
|
||||
else:
|
||||
api.log.warning('%s: value %s in attribute %s does not belong into any known container' % (pkey, value, attr))
|
||||
logger.warning('%s: value %s in attribute %s does not '
|
||||
'belong into any known container',
|
||||
pkey, value, attr)
|
||||
continue
|
||||
|
||||
if not remote_entry.get(primary_key):
|
||||
api.log.warning('%s: there is no primary key %s to migrate for %s' % (pkey, primary_key, attr))
|
||||
logger.warning('%s: there is no primary key %s to migrate '
|
||||
'for %s', pkey, primary_key, attr)
|
||||
continue
|
||||
|
||||
api.log.debug('converting DN value %s for %s in %s' % (value, attr, dn))
|
||||
logger.debug('converting DN value %s for %s in %s',
|
||||
value, attr, dn)
|
||||
rdnval = remote_entry[primary_key][0].lower()
|
||||
entry_attrs[attr][ind] = DN((primary_key, rdnval), container, api.env.basedn)
|
||||
|
||||
@@ -318,7 +329,7 @@ def _update_default_group(ldap, ctx, force):
|
||||
searchfilter, [''], DN(api.env.container_user, api.env.basedn),
|
||||
scope=ldap.SCOPE_SUBTREE, time_limit=-1, size_limit=-1)
|
||||
except errors.NotFound:
|
||||
api.log.debug('All users have default group set')
|
||||
logger.debug('All users have default group set')
|
||||
return
|
||||
|
||||
member_dns = [m.dn for m in result]
|
||||
@@ -327,14 +338,14 @@ def _update_default_group(ldap, ctx, force):
|
||||
with ldap.error_handler():
|
||||
ldap.conn.modify_s(str(group_dn), modlist)
|
||||
except errors.DatabaseError as e:
|
||||
api.log.error('Adding new members to default group failed: %s \n'
|
||||
'members: %s', e, ','.join(member_dns))
|
||||
logger.error('Adding new members to default group failed: %s \n'
|
||||
'members: %s', e, ','.join(member_dns))
|
||||
|
||||
e = datetime.datetime.now()
|
||||
d = e - s
|
||||
mode = " (forced)" if force else ""
|
||||
api.log.info('Adding %d users to group%s duration %s',
|
||||
len(member_dns), mode, d)
|
||||
logger.info('Adding %d users to group%s duration %s',
|
||||
len(member_dns), mode, d)
|
||||
|
||||
# GROUP MIGRATION CALLBACKS AND VARS
|
||||
|
||||
@@ -352,24 +363,25 @@ def _pre_migrate_group(ldap, pkey, dn, entry_attrs, failed, config, ctx, **kwarg
|
||||
except ValueError as e:
|
||||
# This should be impossible unless the remote server
|
||||
# doesn't enforce syntax checking.
|
||||
api.log.error('Malformed DN %s: %s' % (m, e))
|
||||
logger.error('Malformed DN %s: %s', m, e)
|
||||
continue
|
||||
try:
|
||||
rdnval = m[0].value
|
||||
except IndexError:
|
||||
api.log.error('Malformed DN %s has no RDN?' % m)
|
||||
logger.error('Malformed DN %s has no RDN?', m)
|
||||
continue
|
||||
|
||||
if m.endswith(search_bases['user']):
|
||||
api.log.debug('migrating %s user %s', member_attr, m)
|
||||
logger.debug('migrating %s user %s', member_attr, m)
|
||||
m = DN((api.Object.user.primary_key.name, rdnval),
|
||||
api.env.container_user, api.env.basedn)
|
||||
elif m.endswith(search_bases['group']):
|
||||
api.log.debug('migrating %s group %s', member_attr, m)
|
||||
logger.debug('migrating %s group %s', member_attr, m)
|
||||
m = DN((api.Object.group.primary_key.name, rdnval),
|
||||
api.env.container_group, api.env.basedn)
|
||||
else:
|
||||
api.log.error('entry %s does not belong into any known container' % m)
|
||||
logger.error('entry %s does not belong into any known '
|
||||
'container', m)
|
||||
continue
|
||||
|
||||
new_members.append(m)
|
||||
@@ -861,8 +873,10 @@ migration process might be incomplete\n''')
|
||||
total_dur = e - migration_start
|
||||
migrate_cnt += 1
|
||||
if migrate_cnt > 0 and migrate_cnt % 100 == 0:
|
||||
api.log.info("%d %ss migrated. %s elapsed." % (migrate_cnt, ldap_obj_name, total_dur))
|
||||
api.log.debug("%d %ss migrated, duration: %s (total %s)" % (migrate_cnt, ldap_obj_name, d, total_dur))
|
||||
logger.info("%d %ss migrated. %s elapsed.",
|
||||
migrate_cnt, ldap_obj_name, total_dur)
|
||||
logger.debug("%d %ss migrated, duration: %s (total %s)",
|
||||
migrate_cnt, ldap_obj_name, d, total_dur)
|
||||
|
||||
if 'def_group_dn' in context:
|
||||
_update_default_group(ldap, context, True)
|
||||
|
||||
Reference in New Issue
Block a user