logging: do not reference loggers in arguments and attributes

Remove logger arguments in all functions and logger attributes in all
objects, with the exception of API object logger, which is now deprecated.
Replace affected logger calls with module-level logger calls.

Reviewed-By: Martin Basti <mbasti@redhat.com>
This commit is contained in:
Jan Cholasta 2017-05-24 13:42:23 +00:00 committed by Martin Basti
parent bccb243b05
commit ab9d1e75fc
23 changed files with 325 additions and 265 deletions

View File

@ -47,6 +47,7 @@ For more information see
"""
from __future__ import print_function
import logging
import os
import optparse # pylint: disable=deprecated-module
import ssl
@ -68,6 +69,8 @@ from werkzeug.utils import redirect, append_slash_redirect
from werkzeug.wsgi import DispatcherMiddleware, SharedDataMiddleware
# pylint: enable=import-error
logger = logging.getLogger(os.path.basename(__file__))
BASEDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
IMPORTDIR = os.path.dirname(os.path.dirname(os.path.abspath(ipalib.__file__)))
@ -181,7 +184,7 @@ def init_api(ccname):
)
api.finalize()
api_time = time.time()
api.log.info("API initialized in {:03f} sec".format(api_time - start_time))
logger.info("API initialized in %03f sec", api_time - start_time)
# Validate LDAP connection and pre-fetch schema
# Pre-fetching makes the lite-server behave similar to mod_wsgi. werkzeug's
@ -194,9 +197,9 @@ def init_api(ccname):
if not ldap2.isconnected():
ldap2.connect(ccache=ccname)
except NetworkError as e:
api.log.error("Unable to connect to LDAP: %s", e)
api.log.error("lite-server needs a working LDAP connect. Did you "
"configure ldap_uri in '%s'?", api.env.conf_default)
logger.error("Unable to connect to LDAP: %s", e)
logger.error("lite-server needs a working LDAP connect. Did you "
"configure ldap_uri in '%s'?", api.env.conf_default)
sys.exit(2)
else:
# prefetch schema
@ -205,8 +208,7 @@ def init_api(ccname):
# must have its own connection.
ldap2.disconnect()
ldap_time = time.time()
api.log.info("LDAP schema retrieved {:03f} sec".format(
ldap_time - api_time))
logger.info("LDAP schema retrieved %03f sec", ldap_time - api_time)
def redirect_ui(app):

View File

@ -10,7 +10,7 @@ This program should be run only on replicas, not on DNSSEC masters.
from binascii import hexlify
from gssapi.exceptions import GSSError
import logging
import os
import sys
@ -27,6 +27,8 @@ from ipaserver.dnssec.abshsm import (sync_pkcs11_metadata,
from ipaserver.dnssec.ldapkeydb import LdapKeyDB
from ipaserver.dnssec.localhsm import LocalHSM
logger = logging.getLogger(os.path.basename(__file__))
DAEMONNAME = 'ipa-dnskeysyncd'
PRINCIPAL = None # not initialized yet
WORKDIR = '/tmp'
@ -37,18 +39,19 @@ def hex_set(s):
out.add("0x%s" % hexlify(i))
return out
def update_metadata_set(log, source_set, target_set):
def update_metadata_set(source_set, target_set):
"""sync metadata from source key set to target key set
Keys not present in both sets are left intact."""
log = log.getChild('sync_metadata')
name = 'sync_metadata'
matching_keys = set(source_set.keys()).intersection(set(target_set.keys()))
log.info("keys in local HSM & LDAP: %s", hex_set(matching_keys))
logger.info("%s: keys in local HSM & LDAP: %s",
name, hex_set(matching_keys))
for key_id in matching_keys:
sync_pkcs11_metadata(log, source_set[key_id], target_set[key_id])
sync_pkcs11_metadata(name, source_set[key_id], target_set[key_id])
def find_unwrapping_key(log, localhsm, wrapping_key_uri):
def find_unwrapping_key(localhsm, wrapping_key_uri):
wrap_keys = localhsm.find_keys(uri=wrapping_key_uri)
# find usable unwrapping key with matching ID
for key_id in wrap_keys.keys():
@ -56,20 +59,23 @@ def find_unwrapping_key(log, localhsm, wrapping_key_uri):
if len(unwrap_keys) > 0:
return unwrap_keys.popitem()[1]
def ldap2replica_master_keys_sync(log, ldapkeydb, localhsm):
def ldap2replica_master_keys_sync(ldapkeydb, localhsm):
## LDAP -> replica master key synchronization
# import new master keys from LDAP
new_keys = set(ldapkeydb.master_keys.keys()) \
- set(localhsm.master_keys.keys())
log.debug("master keys in local HSM: %s", hex_set(localhsm.master_keys.keys()))
log.debug("master keys in LDAP HSM: %s", hex_set(ldapkeydb.master_keys.keys()))
log.debug("new master keys in LDAP HSM: %s", hex_set(new_keys))
logger.debug("master keys in local HSM: %s",
hex_set(localhsm.master_keys.keys()))
logger.debug("master keys in LDAP HSM: %s",
hex_set(ldapkeydb.master_keys.keys()))
logger.debug("new master keys in LDAP HSM: %s",
hex_set(new_keys))
for mkey_id in new_keys:
mkey_ldap = ldapkeydb.master_keys[mkey_id]
assert mkey_ldap.wrapped_entries, "Master key 0x%s in LDAP is missing key material referenced by ipaSecretKeyRefObject attribute" % hexlify(mkey_id)
for wrapped_ldap in mkey_ldap.wrapped_entries:
unwrapping_key = find_unwrapping_key(log, localhsm,
wrapped_ldap.single_value['ipaWrappingKey'])
unwrapping_key = find_unwrapping_key(
localhsm, wrapped_ldap.single_value['ipaWrappingKey'])
if unwrapping_key:
break
@ -80,38 +86,42 @@ def ldap2replica_master_keys_sync(log, ldapkeydb, localhsm):
params['data'] = wrapped_ldap.single_value['ipaSecretKey']
params['unwrapping_key'] = unwrapping_key.handle
params['wrapping_mech'] = wrappingmech_name2id[wrapped_ldap.single_value['ipaWrappingMech']]
log.debug('Importing new master key: 0x%s %s', hexlify(mkey_id), params)
logger.debug('Importing new master key: 0x%s %s',
hexlify(mkey_id), params)
localhsm.p11.import_wrapped_secret_key(**params)
# synchronize metadata about master keys in LDAP
update_metadata_set(log, ldapkeydb.master_keys, localhsm.master_keys)
update_metadata_set(ldapkeydb.master_keys, localhsm.master_keys)
def ldap2replica_zone_keys_sync(log, ldapkeydb, localhsm):
def ldap2replica_zone_keys_sync(ldapkeydb, localhsm):
## LDAP -> replica zone key synchronization
# import new zone keys from LDAP
new_keys = set(ldapkeydb.zone_keypairs.keys()) \
- set(localhsm.zone_privkeys.keys())
log.debug("zone keys in local HSM: %s", hex_set(localhsm.master_keys.keys()))
log.debug("zone keys in LDAP HSM: %s", hex_set(ldapkeydb.master_keys.keys()))
log.debug("new zone keys in LDAP HSM: %s", hex_set(new_keys))
logger.debug("zone keys in local HSM: %s",
hex_set(localhsm.master_keys.keys()))
logger.debug("zone keys in LDAP HSM: %s",
hex_set(ldapkeydb.master_keys.keys()))
logger.debug("new zone keys in LDAP HSM: %s",
hex_set(new_keys))
for zkey_id in new_keys:
zkey_ldap = ldapkeydb.zone_keypairs[zkey_id]
log.debug('Looking for unwrapping key "%s" for zone key 0x%s',
zkey_ldap['ipaWrappingKey'], hexlify(zkey_id))
unwrapping_key = find_unwrapping_key(log, localhsm,
zkey_ldap['ipaWrappingKey'])
logger.debug('Looking for unwrapping key "%s" for zone key 0x%s',
zkey_ldap['ipaWrappingKey'], hexlify(zkey_id))
unwrapping_key = find_unwrapping_key(
localhsm, zkey_ldap['ipaWrappingKey'])
assert unwrapping_key is not None, \
"Local HSM does not contain suitable unwrapping key for ' \
'zone key 0x%s" % hexlify(zkey_id)
log.debug('Importing zone key pair 0x%s', hexlify(zkey_id))
logger.debug('Importing zone key pair 0x%s', hexlify(zkey_id))
localhsm.import_private_key(zkey_ldap, zkey_ldap['ipaPrivateKey'],
unwrapping_key)
localhsm.import_public_key(zkey_ldap, zkey_ldap['ipaPublicKey'])
# synchronize metadata about zone keys in LDAP & local HSM
update_metadata_set(log, ldapkeydb.master_keys, localhsm.master_keys)
update_metadata_set(ldapkeydb.master_keys, localhsm.master_keys)
# delete keys removed from LDAP
deleted_keys = set(localhsm.zone_privkeys.keys()) \
@ -151,14 +161,15 @@ log.debug('Connected')
### DNSSEC master: key synchronization
ldapkeydb = LdapKeyDB(log, ldap,
DN(('cn', 'keys'), ('cn', 'sec'), ipalib.api.env.container_dns,
ipalib.api.env.basedn))
ldapkeydb = LdapKeyDB(ldap, DN(('cn', 'keys'),
('cn', 'sec'),
ipalib.api.env.container_dns,
ipalib.api.env.basedn))
localhsm = LocalHSM(paths.LIBSOFTHSM2_SO, SOFTHSM_DNSSEC_TOKEN_LABEL,
open(paths.DNSSEC_SOFTHSM_PIN).read())
ldap2replica_master_keys_sync(log, ldapkeydb, localhsm)
ldap2replica_zone_keys_sync(log, ldapkeydb, localhsm)
ldap2replica_master_keys_sync(ldapkeydb, localhsm)
ldap2replica_zone_keys_sync(ldapkeydb, localhsm)
sys.exit(0)

View File

@ -42,6 +42,8 @@ from ipaserver.dnssec.abshsm import sync_pkcs11_metadata, wrappingmech_name2id
from ipaserver.dnssec.ldapkeydb import LdapKeyDB
from ipaserver.dnssec.localhsm import LocalHSM
logger = logging.getLogger(os.path.basename(__file__))
DAEMONNAME = 'ipa-ods-exporter'
PRINCIPAL = None # not initialized yet
WORKDIR = os.path.join(paths.VAR_OPENDNSSEC_DIR ,'tmp')
@ -269,76 +271,86 @@ def get_ods_keys(zone_name):
key_data.update(sql2ldap_keyid(row['HSMkey_id']))
keys[key_id] = key_data
log.debug("key %s metadata: %s", key_id, key_data)
logger.debug("key %s metadata: %s", key_id, key_data)
return keys
def sync_set_metadata_2ldap(log, source_set, target_set):
def sync_set_metadata_2ldap(name, source_set, target_set):
"""sync metadata from source key set to target key set in LDAP
Keys not present in both sets are left intact."""
log = log.getChild('sync_set_metadata_2ldap')
matching_keys = set(source_set.keys()).intersection(set(target_set.keys()))
log.info("keys in local HSM & LDAP: %s", hex_set(matching_keys))
logger.info("%s: keys in local HSM & LDAP: %s",
name, hex_set(matching_keys))
for key_id in matching_keys:
sync_pkcs11_metadata(log, source_set[key_id], target_set[key_id])
sync_pkcs11_metadata(name, source_set[key_id], target_set[key_id])
def ldap2master_replica_keys_sync(log, ldapkeydb, localhsm):
def ldap2master_replica_keys_sync(ldapkeydb, localhsm):
"""LDAP=>master's local HSM replica key synchronization"""
# import new replica keys from LDAP
log = log.getChild('ldap2master_replica')
log.debug("replica pub keys in LDAP: %s", hex_set(ldapkeydb.replica_pubkeys_wrap))
log.debug("replica pub keys in SoftHSM: %s", hex_set(localhsm.replica_pubkeys_wrap))
logger.debug("replica pub keys in LDAP: %s",
hex_set(ldapkeydb.replica_pubkeys_wrap))
logger.debug("replica pub keys in SoftHSM: %s",
hex_set(localhsm.replica_pubkeys_wrap))
new_replica_keys = set(ldapkeydb.replica_pubkeys_wrap.keys()) \
- set(localhsm.replica_pubkeys_wrap.keys())
log.info("new replica keys in LDAP: %s", hex_set(new_replica_keys))
logger.info("new replica keys in LDAP: %s",
hex_set(new_replica_keys))
for key_id in new_replica_keys:
new_key_ldap = ldapkeydb.replica_pubkeys_wrap[key_id]
log.debug('label=%s, id=%s, data=%s',
new_key_ldap['ipk11label'],
hexlify(new_key_ldap['ipk11id']),
hexlify(new_key_ldap['ipapublickey']))
logger.debug('label=%s, id=%s, data=%s',
new_key_ldap['ipk11label'],
hexlify(new_key_ldap['ipk11id']),
hexlify(new_key_ldap['ipapublickey']))
localhsm.import_public_key(new_key_ldap, new_key_ldap['ipapublickey'])
# set CKA_WRAP = FALSE for all replica keys removed from LDAP
removed_replica_keys = set(localhsm.replica_pubkeys_wrap.keys()) \
- set(ldapkeydb.replica_pubkeys_wrap.keys())
log.info("obsolete replica keys in local HSM: %s",
hex_set(removed_replica_keys))
logger.info("obsolete replica keys in local HSM: %s",
hex_set(removed_replica_keys))
for key_id in removed_replica_keys:
localhsm.replica_pubkeys_wrap[key_id]['ipk11wrap'] = False
# synchronize replica key attributes from LDAP to local HSM
sync_set_metadata_2ldap(log, localhsm.replica_pubkeys_wrap,
ldapkeydb.replica_pubkeys_wrap)
sync_set_metadata_2ldap('ldap2master_replica',
localhsm.replica_pubkeys_wrap,
ldapkeydb.replica_pubkeys_wrap)
def master2ldap_master_keys_sync(log, ldapkeydb, localhsm):
def master2ldap_master_keys_sync(ldapkeydb, localhsm):
## master key -> LDAP synchronization
# export new master keys to LDAP
new_master_keys = set(localhsm.master_keys.keys()) \
- set(ldapkeydb.master_keys.keys())
log.debug("master keys in local HSM: %s", hex_set(localhsm.master_keys.keys()))
log.debug("master keys in LDAP HSM: %s", hex_set(ldapkeydb.master_keys.keys()))
log.debug("new master keys in local HSM: %s", hex_set(new_master_keys))
logger.debug("master keys in local HSM: %s",
hex_set(localhsm.master_keys.keys()))
logger.debug("master keys in LDAP HSM: %s",
hex_set(ldapkeydb.master_keys.keys()))
logger.debug("new master keys in local HSM: %s",
hex_set(new_master_keys))
for mkey_id in new_master_keys:
mkey = localhsm.master_keys[mkey_id]
ldapkeydb.import_master_key(mkey)
# re-fill cache with keys we just added
ldapkeydb.flush()
log.debug('master keys in LDAP after flush: %s', hex_set(ldapkeydb.master_keys))
logger.debug('master keys in LDAP after flush: %s',
hex_set(ldapkeydb.master_keys))
# synchronize master key metadata to LDAP
for mkey_id, mkey_local in localhsm.master_keys.items():
log.debug('synchronizing master key metadata: 0x%s', hexlify(mkey_id))
sync_pkcs11_metadata(log, mkey_local, ldapkeydb.master_keys[mkey_id])
logger.debug('synchronizing master key metadata: 0x%s',
hexlify(mkey_id))
sync_pkcs11_metadata('master2ldap_master', mkey_local, ldapkeydb.master_keys[mkey_id])
# re-wrap all master keys in LDAP with new replica keys (as necessary)
enabled_replica_key_ids = set(localhsm.replica_pubkeys_wrap.keys())
log.debug('enabled replica key ids: %s', hex_set(enabled_replica_key_ids))
logger.debug('enabled replica key ids: %s',
hex_set(enabled_replica_key_ids))
for mkey_id, mkey_ldap in ldapkeydb.master_keys.items():
log.debug('processing master key data: 0x%s', hexlify(mkey_id))
logger.debug('processing master key data: 0x%s',
hexlify(mkey_id))
# check that all active replicas have own copy of master key
used_replica_keys = set()
@ -355,14 +367,14 @@ def master2ldap_master_keys_sync(log, ldapkeydb, localhsm):
used_replica_keys.add(matching_key['ipk11id'])
new_replica_keys = enabled_replica_key_ids - used_replica_keys
log.debug('master key 0x%s is not wrapped with replica keys %s',
hexlify(mkey_id), hex_set(new_replica_keys))
logger.debug('master key 0x%s is not wrapped with replica keys %s',
hexlify(mkey_id), hex_set(new_replica_keys))
# wrap master key with new replica keys
mkey_local = localhsm.find_keys(id=mkey_id).popitem()[1]
for replica_key_id in new_replica_keys:
log.info('adding master key 0x%s wrapped with replica key 0x%s' % (
hexlify(mkey_id), hexlify(replica_key_id)))
logger.info('adding master key 0x%s wrapped with replica key 0x%s',
hexlify(mkey_id), hexlify(replica_key_id))
replica_key = localhsm.replica_pubkeys_wrap[replica_key_id]
keydata = localhsm.p11.export_wrapped_key(mkey_local.handle,
replica_key.handle,
@ -372,18 +384,17 @@ def master2ldap_master_keys_sync(log, ldapkeydb, localhsm):
ldapkeydb.flush()
def master2ldap_zone_keys_sync(log, ldapkeydb, localhsm):
def master2ldap_zone_keys_sync(ldapkeydb, localhsm):
"""add and update zone key material from local HSM to LDAP
No key material will be removed, only new keys will be added or updated.
Key removal is hanled by master2ldap_zone_keys_purge()."""
log = log.getChild('master2ldap_zone_keys')
keypairs_ldap = ldapkeydb.zone_keypairs
log.debug("zone keys in LDAP: %s", hex_set(keypairs_ldap))
logger.debug("zone keys in LDAP: %s", hex_set(keypairs_ldap))
pubkeys_local = localhsm.zone_pubkeys
privkeys_local = localhsm.zone_privkeys
log.debug("zone keys in local HSM: %s", hex_set(privkeys_local))
logger.debug("zone keys in local HSM: %s", hex_set(privkeys_local))
assert set(pubkeys_local) == set(privkeys_local), (
"IDs of private and public keys for DNS zones in local HSM does "
@ -391,7 +402,7 @@ def master2ldap_zone_keys_sync(log, ldapkeydb, localhsm):
(hex_set(pubkeys_local), hex_set(privkeys_local)))
new_keys = set(pubkeys_local) - set(keypairs_ldap)
log.debug("new zone keys in local HSM: %s", hex_set(new_keys))
logger.debug("new zone keys in local HSM: %s", hex_set(new_keys))
mkey = localhsm.active_master_key
# wrap each new zone key pair with selected master key
for zkey_id in new_keys:
@ -405,30 +416,30 @@ def master2ldap_zone_keys_sync(log, ldapkeydb, localhsm):
ldapkeydb.import_zone_key(pubkey, pubkey_data, privkey, privkey_data,
PRIVKEY_WRAPPING_MECH, mkey['ipk11id'])
sync_set_metadata_2ldap(log, pubkeys_local, keypairs_ldap)
sync_set_metadata_2ldap(log, privkeys_local, keypairs_ldap)
sync_set_metadata_2ldap('master2ldap_zone_keys', pubkeys_local, keypairs_ldap)
sync_set_metadata_2ldap('master2ldap_zone_keys', privkeys_local, keypairs_ldap)
ldapkeydb.flush()
def master2ldap_zone_keys_purge(log, ldapkeydb, localhsm):
def master2ldap_zone_keys_purge(ldapkeydb, localhsm):
"""purge removed key material from LDAP (but not metadata)
Keys which are present in LDAP but not in local HSM will be removed.
Key metadata must be removed first so references to removed key material
are removed before actually removing the keys."""
keypairs_ldap = ldapkeydb.zone_keypairs
log.debug("zone keys in LDAP: %s", hex_set(keypairs_ldap))
logger.debug("zone keys in LDAP: %s", hex_set(keypairs_ldap))
pubkeys_local = localhsm.zone_pubkeys
privkeys_local = localhsm.zone_privkeys
log.debug("zone keys in local HSM: %s", hex_set(privkeys_local))
logger.debug("zone keys in local HSM: %s", hex_set(privkeys_local))
assert set(pubkeys_local) == set(privkeys_local), \
"IDs of private and public keys for DNS zones in local HSM does " \
"not match to key pairs: %s vs. %s" % \
(hex_set(pubkeys_local), hex_set(privkeys_local))
deleted_key_ids = set(keypairs_ldap) - set(pubkeys_local)
log.debug("zone keys deleted from local HSM but present in LDAP: %s",
hex_set(deleted_key_ids))
logger.debug("zone keys deleted from local HSM but present in LDAP: %s",
hex_set(deleted_key_ids))
for zkey_id in deleted_key_ids:
keypairs_ldap[zkey_id].schedule_deletion()
ldapkeydb.flush()
@ -439,7 +450,7 @@ def hex_set(s):
out.add("0x%s" % hexlify(i))
return out
def receive_systemd_command(log):
def receive_systemd_command():
fds = systemd.daemon.listen_fds()
if len(fds) != 1:
raise KeyError('Exactly one socket is expected.')
@ -447,16 +458,17 @@ def receive_systemd_command(log):
sck = socket.fromfd(fds[0], socket.AF_UNIX, socket.SOCK_STREAM)
rlist, _wlist, _xlist = select.select([sck], [], [], 0)
if not rlist:
log.critical('socket activation did not return socket with a command')
logger.critical('socket activation did not return socket with a '
'command')
sys.exit(0)
log.debug('accepting new connection')
logger.debug('accepting new connection')
conn, _addr = sck.accept()
log.debug('accepted new connection %s', repr(conn))
logger.debug('accepted new connection %s', repr(conn))
# this implements cmdhandler_handle_cmd() logic
cmd = conn.recv(ODS_SE_MAXLINE).strip()
log.debug('received command "%s" from systemd socket', cmd)
logger.debug('received command "%s" from systemd socket', cmd)
return (cmd, conn)
def parse_command(cmd):
@ -514,13 +526,12 @@ def cmd2ods_zone_name(cmd):
return zone_name
def sync_zone(log, ldap, dns_dn, zone_name):
def sync_zone(ldap, dns_dn, zone_name):
"""synchronize metadata about zone keys for single DNS zone
Key material has to be synchronized elsewhere.
Keep in mind that keys could be shared among multiple zones!"""
log.getChild("%s.%s" % (__name__, zone_name))
log.debug('synchronizing zone "%s"', zone_name)
logger.debug('%s: synchronizing zone "%s"', zone_name, zone_name)
ods_keys = get_ods_keys(zone_name)
ods_keys_id = set(ods_keys.keys())
@ -552,54 +563,58 @@ def sync_zone(log, ldap, dns_dn, zone_name):
ldap_keys_id = set(ldap_keys.keys())
new_keys_id = ods_keys_id - ldap_keys_id
log.info('new key metadata from ODS: %s', new_keys_id)
logger.info('%s: new key metadata from ODS: %s', zone_name, new_keys_id)
for key_id in new_keys_id:
cn = "cn=%s" % key_id
key_dn = DN(cn, keys_dn)
log.debug('adding key metadata "%s" to LDAP', key_dn)
logger.debug('%s: adding key metadata "%s" to LDAP', zone_name, key_dn)
ldap_key = ldap.make_entry(key_dn,
objectClass=['idnsSecKey'],
**ods_keys[key_id])
ldap.add_entry(ldap_key)
deleted_keys_id = ldap_keys_id - ods_keys_id
log.info('deleted key metadata in LDAP: %s', deleted_keys_id)
logger.info('%s: deleted key metadata in LDAP: %s',
zone_name, deleted_keys_id)
for key_id in deleted_keys_id:
cn = "cn=%s" % key_id
key_dn = DN(cn, keys_dn)
log.debug('deleting key metadata "%s" from LDAP', key_dn)
logger.debug('%s: deleting key metadata "%s" from LDAP',
zone_name, key_dn)
ldap.delete_entry(key_dn)
update_keys_id = ldap_keys_id.intersection(ods_keys_id)
log.info('key metadata in LDAP & ODS: %s', update_keys_id)
logger.info('%s: key metadata in LDAP & ODS: %s',
zone_name, update_keys_id)
for key_id in update_keys_id:
ldap_key = ldap_keys[key_id]
ods_key = ods_keys[key_id]
log.debug('updating key metadata "%s" in LDAP', ldap_key.dn)
logger.debug('%s: updating key metadata "%s" in LDAP',
zone_name, ldap_key.dn)
ldap_key.update(ods_key)
try:
ldap.update_entry(ldap_key)
except ipalib.errors.EmptyModlist:
continue
def cleanup_ldap_zone(log, ldap, dns_dn, zone_name):
def cleanup_ldap_zone(ldap, dns_dn, zone_name):
"""delete all key metadata about zone keys for single DNS zone
Key material has to be synchronized elsewhere.
Keep in mind that keys could be shared among multiple zones!"""
log = log.getChild("%s.%s" % (__name__, zone_name))
log.debug('cleaning up key metadata from zone "%s"', zone_name)
logger.debug('%s: cleaning up key metadata from zone "%s"',
zone_name, zone_name)
try:
ldap_zone = get_ldap_zone(ldap, dns_dn, zone_name)
ldap_keys = get_ldap_keys(ldap, ldap_zone.dn)
except ipalib.errors.NotFound as ex:
# zone or cn=keys container does not exist, we are done
log.debug(str(ex))
logger.debug('%s: %s', zone_name, str(ex))
return
for ldap_key in ldap_keys:
log.debug('deleting key metadata "%s"', ldap_key.dn)
logger.debug('%s: deleting key metadata "%s"', zone_name, ldap_key.dn)
ldap.delete_entry(ldap_key)
log = root_logger
@ -643,22 +658,23 @@ log.debug('Connected')
### DNSSEC master: key material upload & synchronization (but not deletion)
ldapkeydb = LdapKeyDB(log, ldap, DN(('cn', 'keys'), ('cn', 'sec'),
ipalib.api.env.container_dns,
ipalib.api.env.basedn))
ldapkeydb = LdapKeyDB(ldap, DN(('cn', 'keys'),
('cn', 'sec'),
ipalib.api.env.container_dns,
ipalib.api.env.basedn))
localhsm = LocalHSM(paths.LIBSOFTHSM2_SO, SOFTHSM_DNSSEC_TOKEN_LABEL,
open(paths.DNSSEC_SOFTHSM_PIN).read())
ldap2master_replica_keys_sync(log, ldapkeydb, localhsm)
master2ldap_master_keys_sync(log, ldapkeydb, localhsm)
master2ldap_zone_keys_sync(log, ldapkeydb, localhsm)
ldap2master_replica_keys_sync(ldapkeydb, localhsm)
master2ldap_master_keys_sync(ldapkeydb, localhsm)
master2ldap_zone_keys_sync(ldapkeydb, localhsm)
### DNSSEC master: DNSSEC key metadata upload & synchronization & deletion
# command receive is delayed so the command will stay in socket queue until
# the problem with LDAP server or HSM is fixed
try:
cmd, conn = receive_systemd_command(log)
cmd, conn = receive_systemd_command()
if len(sys.argv) != 1:
log.critical('No additional parameters are accepted when '
'socket activation is used.')
@ -703,18 +719,18 @@ try:
if zone_name is not None:
# only one zone should be processed
if cmd == 'update':
sync_zone(log, ldap, dns_dn, zone_name)
sync_zone(ldap, dns_dn, zone_name)
elif cmd == 'ldap-cleanup':
cleanup_ldap_zone(log, ldap, dns_dn, zone_name)
cleanup_ldap_zone(ldap, dns_dn, zone_name)
else:
# process all zones
for zone_row in db.execute("SELECT name FROM zones"):
sync_zone(log, ldap, dns_dn, zone_row['name'])
sync_zone(ldap, dns_dn, zone_row['name'])
### DNSSEC master: DNSSEC key material purging
# references to old key material were removed above in sync_zone()
# so now we can purge old key material from LDAP
master2ldap_zone_keys_purge(log, ldapkeydb, localhsm)
master2ldap_zone_keys_purge(ldapkeydb, localhsm)
except Exception as ex:
msg = "ipa-ods-exporter exception: %s" % traceback.format_exc(ex)

View File

@ -751,15 +751,15 @@ def run(api):
sys.exit(api.Backend.cli.run(argv))
except KeyboardInterrupt:
print('')
api.log.info('operation aborted')
logger.info('operation aborted')
except PublicError as e:
error = e
except Exception as e:
api.log.exception('%s: %s', e.__class__.__name__, str(e))
logger.exception('%s: %s', e.__class__.__name__, str(e))
error = InternalError()
if error is not None:
assert isinstance(error, PublicError)
api.log.error(error.strerror)
logger.error(error.strerror)
sys.exit(error.rval)
#+END_SRC

View File

@ -1,17 +1,23 @@
import logging
import os
from ipaplatform.paths import paths
from ipalib import api
logger = logging.getLogger(os.path.basename(__file__))
api.bootstrap(context='server', confdir=paths.ETC_IPA, log=None) (ref:wsgi-app-bootstrap)
try:
api.finalize() (ref:wsgi-app-finalize)
except Exception as e:
api.log.error('Failed to start IPA: %s' % e)
logger.error('Failed to start IPA: %s', e)
else:
api.log.info('*** PROCESS START ***')
logger.info('*** PROCESS START ***')
# This is the WSGI callable:
def application(environ, start_response): (ref:wsgi-app-start)
if not environ['wsgi.multithread']:
return api.Backend.session(environ, start_response)
else:
api.log.error("IPA does not work with the threaded MPM, use the pre-fork MPM") (ref:wsgi-app-end)
logger.error("IPA does not work with the threaded MPM, "
"use the pre-fork MPM") (ref:wsgi-app-end)

View File

@ -23,20 +23,26 @@
"""
WSGI appliction for IPA server.
"""
import logging
import os
from ipaplatform.paths import paths
from ipalib import api
logger = logging.getLogger(os.path.basename(__file__))
api.bootstrap(context='server', confdir=paths.ETC_IPA, log=None)
try:
api.finalize()
except Exception as e:
api.log.error('Failed to start IPA: %s' % e)
logger.error('Failed to start IPA: %s', e)
else:
api.log.info('*** PROCESS START ***')
logger.info('*** PROCESS START ***')
# This is the WSGI callable:
def application(environ, start_response):
if not environ['wsgi.multithread']:
return api.Backend.wsgi_dispatch(environ, start_response)
else:
api.log.error("IPA does not work with the threaded MPM, use the pre-fork MPM")
logger.error("IPA does not work with the threaded MPM, "
"use the pre-fork MPM")

View File

@ -23,6 +23,7 @@
This script creates or removes the symlink from /etc/ipa/ipa-kdc-proxy.conf
to /etc/httpd/conf.d/. It's called from ExecStartPre hook in httpd.service.
"""
import logging
import os
import socket
import sys
@ -33,6 +34,8 @@ from ipapython.ipaldap import LDAPClient
from ipapython.dn import DN
from ipaplatform.paths import paths
logger = logging.getLogger(os.path.basename(__file__))
DEBUG = False
TIME_LIMIT = 2
@ -66,7 +69,6 @@ class KDCProxyConfig(object):
def __init__(self, time_limit=TIME_LIMIT):
self.time_limit = time_limit
self.con = None
self.log = api.log
self.ldap_uri = api.env.ldap_uri
self.kdc_dn = DN(('cn', 'KDC'), ('cn', api.env.host),
('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'),
@ -76,7 +78,7 @@ class KDCProxyConfig(object):
def _ldap_con(self):
"""Establish LDAP connection"""
self.log.debug('ldap_uri: %s', self.ldap_uri)
logger.debug('ldap_uri: %s', self.ldap_uri)
try:
self.con = LDAPClient(self.ldap_uri)
self.con.external_bind()
@ -89,7 +91,7 @@ class KDCProxyConfig(object):
except Exception as e:
msg = ('Unknown error while retrieving setting from %s: %s' %
(self.ldap_uri, e))
self.log.exception(msg)
logger.exception('%s', msg)
raise FatalError(msg)
def _find_entry(self, dn, attrs, filter, scope=LDAPClient.SCOPE_BASE):
@ -98,23 +100,23 @@ class KDCProxyConfig(object):
entries = self.con.get_entries(
dn, scope, filter, attrs, time_limit=self.time_limit)
except errors.NotFound:
self.log.debug('Entry not found: %s', dn)
logger.debug('Entry not found: %s', dn)
return None
except Exception as e:
msg = ('Unknown error while retrieving setting from %s: %s' %
(self.ldap_uri, e))
self.log.exception(msg)
logger.exception('%s', msg)
raise FatalError(msg)
return entries[0]
def is_host_enabled(self):
"""Check replica specific flag"""
self.log.debug('Read settings from dn: %s', self.kdc_dn)
logger.debug('Read settings from dn: %s', self.kdc_dn)
srcfilter = self.con.make_filter(
{'ipaConfigString': u'kdcProxyEnabled'}
)
entry = self._find_entry(self.kdc_dn, ['cn'], srcfilter)
self.log.debug('%s ipaConfigString: %s', self.kdc_dn, entry)
logger.debug('%s ipaConfigString: %s', self.kdc_dn, entry)
return entry is not None
def validate_symlink(self):
@ -137,20 +139,20 @@ class KDCProxyConfig(object):
try:
valid = self.validate_symlink()
except ConfigFileError as e:
self.log.warning("Cannot enable KDC proxy: %s " % e)
logger.warning("Cannot enable KDC proxy: %s ", e)
return False
if valid:
self.log.debug("Symlink exists and is valid")
logger.debug("Symlink exists and is valid")
return True
if not os.path.isfile(self.conf):
self.log.warning("'%s' does not exist", self.conf)
logger.warning("'%s' does not exist", self.conf)
return False
# create the symbolic link
self.log.debug("Creating symlink from '%s' to '%s'",
self.conf, self.conflink)
logger.debug("Creating symlink from '%s' to '%s'",
self.conf, self.conflink)
os.symlink(self.conf, self.conflink)
return True
@ -159,15 +161,15 @@ class KDCProxyConfig(object):
try:
valid = self.validate_symlink()
except CheckError as e:
self.log.warning("Cannot disable KDC proxy: %s " % e)
logger.warning("Cannot disable KDC proxy: %s ", e)
return False
if valid:
self.log.debug("Removing symlink '%s'", self.conflink)
logger.debug("Removing symlink '%s'", self.conflink)
os.unlink(self.conflink)
else:
self.log.debug("Symlink '%s' has already been removed.",
self.conflink)
logger.debug("Symlink '%s' has already been removed.",
self.conflink)
return True
@ -193,19 +195,19 @@ def main(debug=DEBUG, time_limit=TIME_LIMIT):
with cfg:
if cfg.is_host_enabled():
if cfg.create_symlink():
api.log.info('KDC proxy enabled')
logger.info('KDC proxy enabled')
return 0
else:
if cfg.remove_symlink():
api.log.info('KDC proxy disabled')
logger.info('KDC proxy disabled')
return 0
except CheckError as e:
api.log.warning(str(e))
api.log.warning('Disabling KDC proxy')
logger.warning('%s', str(e))
logger.warning('Disabling KDC proxy')
cfg.remove_symlink()
return 0
except Exception as e:
api.log.error(str(e))
logger.error('%s', str(e))
return 1

View File

@ -23,6 +23,7 @@ Functionality for Command Line Interface.
from __future__ import print_function
import importlib
import logging
import textwrap
import sys
import getpass
@ -58,6 +59,8 @@ from ipapython.dnsutil import DNSName
import datetime
logger = logging.getLogger(__name__)
def to_cli(name):
"""
@ -1351,13 +1354,13 @@ def run(api):
sys.exit(api.Backend.cli.run(argv))
except KeyboardInterrupt:
print('')
api.log.info('operation aborted')
logger.info('operation aborted')
except PublicError as e:
error = e
except Exception as e:
api.log.exception('%s: %s', e.__class__.__name__, str(e))
logger.exception('%s: %s', e.__class__.__name__, str(e))
error = InternalError()
if error is not None:
assert isinstance(error, PublicError)
api.log.error(error.strerror)
logger.error(error.strerror)
sys.exit(error.rval)

View File

@ -54,6 +54,8 @@ from ipapython.version import VERSION, API_VERSION, DEFAULT_PLUGINS
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
# FIXME: Updated constants.TYPE_ERROR to use this clearer format from wehjit:
TYPE_ERROR = '%s: need a %r; got a %r: %r'
@ -430,9 +432,7 @@ class API(ReadOnly):
Initialize environment variables and logging.
"""
self.__doing('bootstrap')
self.log_mgr = log_mgr
log = log_mgr.root_logger
self.log = log
self.log = log_mgr.get_logger(self)
self.env._bootstrap(**overrides)
self.env._finalize_core(**dict(DEFAULT_CONFIG))
@ -509,7 +509,7 @@ class API(ReadOnly):
try:
os.makedirs(log_dir)
except OSError:
log.error('Could not create log_dir %r', log_dir)
logger.error('Could not create log_dir %r', log_dir)
return
level = logging.INFO
@ -518,7 +518,7 @@ class API(ReadOnly):
try:
handler = logging.FileHandler(self.env.log)
except IOError as e:
log.error('Cannot open log file %r: %s', self.env.log, e)
logger.error('Cannot open log file %r: %s', self.env.log, e)
return
handler.setLevel(level)
handler.setFormatter(ipa_log_manager.Formatter(LOGGING_FORMAT_FILE))
@ -645,28 +645,26 @@ class API(ReadOnly):
name=package_name, file=package_file
)
self.log.debug("importing all plugin modules in %s...", package_name)
logger.debug("importing all plugin modules in %s...", package_name)
modules = getattr(package, 'modules', find_modules_in_dir(package_dir))
modules = ['.'.join((package_name, name)) for name in modules]
for name in modules:
self.log.debug("importing plugin module %s", name)
logger.debug("importing plugin module %s", name)
try:
module = importlib.import_module(name)
except errors.SkipPluginModule as e:
self.log.debug("skipping plugin module %s: %s", name, e.reason)
logger.debug("skipping plugin module %s: %s", name, e.reason)
continue
except Exception as e:
if self.env.startup_traceback:
import traceback
self.log.error("could not load plugin module %s\n%s", name,
traceback.format_exc())
logger.exception("could not load plugin module %s", name)
raise
try:
self.add_module(module)
except errors.PluginModuleError as e:
self.log.debug("%s", e)
logger.debug("%s", e)
def add_module(self, module):
"""
@ -750,7 +748,7 @@ class API(ReadOnly):
if self.env.env_confdir is not None:
if self.env.env_confdir == self.env.confdir:
self.log.info(
logger.info(
"IPA_CONFDIR env sets confdir to '%s'.", self.env.confdir)
for plugin in self.__plugins:

View File

@ -693,7 +693,7 @@ class DNSSECValidationError(ForwarderValidationError):
"failed DNSSEC validation on server %(ip)s")
def _log_response(log, e):
def _log_response(e):
"""
If exception contains response from server, log this response to debug log
:param log: if log is None, do not log
@ -702,7 +702,7 @@ def _log_response(log, e):
assert isinstance(e, DNSException)
response = getattr(e, 'kwargs', {}).get('response')
if response:
log.debug("DNSException: %s; server response: %s", e, response)
logger.debug("DNSException: %s; server response: %s", e, response)
def _resolve_record(owner, rtype, nameserver_ip=None, edns0=False,
@ -738,7 +738,7 @@ def _resolve_record(owner, rtype, nameserver_ip=None, edns0=False,
return res.query(owner, rtype)
def _validate_edns0_forwarder(owner, rtype, ip_addr, log=logger, timeout=10):
def _validate_edns0_forwarder(owner, rtype, ip_addr, timeout=10):
"""
Validate if forwarder supports EDNS0
@ -749,7 +749,7 @@ def _validate_edns0_forwarder(owner, rtype, ip_addr, log=logger, timeout=10):
try:
_resolve_record(owner, rtype, nameserver_ip=ip_addr, timeout=timeout)
except DNSException as e:
_log_response(log, e)
_log_response(e)
raise UnresolvableRecordError(owner=owner, rtype=rtype, ip=ip_addr,
error=e)
@ -757,12 +757,12 @@ def _validate_edns0_forwarder(owner, rtype, ip_addr, log=logger, timeout=10):
_resolve_record(owner, rtype, nameserver_ip=ip_addr, edns0=True,
timeout=timeout)
except DNSException as e:
_log_response(log, e)
_log_response(e)
raise EDNS0UnsupportedError(owner=owner, rtype=rtype, ip=ip_addr,
error=e)
def validate_dnssec_global_forwarder(ip_addr, log=logger, timeout=10):
def validate_dnssec_global_forwarder(ip_addr, timeout=10):
"""Test DNS forwarder properties. against root zone.
Global forwarders should be able return signed root zone
@ -776,14 +776,14 @@ def validate_dnssec_global_forwarder(ip_addr, log=logger, timeout=10):
owner = "."
rtype = "SOA"
_validate_edns0_forwarder(owner, rtype, ip_addr, log=log, timeout=timeout)
_validate_edns0_forwarder(owner, rtype, ip_addr, timeout=timeout)
# DNS root has to be signed
try:
ans = _resolve_record(owner, rtype, nameserver_ip=ip_addr, dnssec=True,
timeout=timeout)
except DNSException as e:
_log_response(log, e)
_log_response(e)
raise DNSSECSignatureMissingError(owner=owner, rtype=rtype, ip=ip_addr)
try:
@ -814,7 +814,6 @@ def validate_dnssec_zone_forwarder_step2(ipa_ip_addr, fwzone, timeout=10):
:raise UnresolvableRecordError: record cannot be resolved
:raise DNSSECValidationError: response from forwarder is not DNSSEC valid
"""
log = logger
rtype = "SOA"
try:
ans_cd = _resolve_record(fwzone, rtype, nameserver_ip=ipa_ip_addr,
@ -822,10 +821,10 @@ def validate_dnssec_zone_forwarder_step2(ipa_ip_addr, fwzone, timeout=10):
timeout=timeout)
except NXDOMAIN as e:
# sometimes CD flag is ignored and NXDomain is returned
_log_response(log, e)
_log_response(e)
raise DNSSECValidationError(owner=fwzone, rtype=rtype, ip=ipa_ip_addr)
except DNSException as e:
_log_response(log, e)
_log_response(e)
raise UnresolvableRecordError(owner=fwzone, rtype=rtype,
ip=ipa_ip_addr, error=e)
@ -833,7 +832,7 @@ def validate_dnssec_zone_forwarder_step2(ipa_ip_addr, fwzone, timeout=10):
ans_do = _resolve_record(fwzone, rtype, nameserver_ip=ipa_ip_addr,
edns0=True, dnssec=True, timeout=timeout)
except DNSException as e:
_log_response(log, e)
_log_response(e)
raise DNSSECValidationError(owner=fwzone, rtype=rtype, ip=ipa_ip_addr)
else:
if (ans_do.canonical_name == ans_cd.canonical_name

View File

@ -6,6 +6,7 @@
Common stuff.
"""
import logging
import traceback
from . import core
@ -14,6 +15,8 @@ from .util import from_
__all__ = ['step', 'Installable', 'Interactive', 'Continuous', 'installer',
'uninstaller']
logger = logging.getLogger(__name__)
def step():
def decorator(func):
@ -90,9 +93,9 @@ class Continuous(core.Configurable):
try:
super(Continuous, self)._handle_execute_exception(exc_info)
except BaseException as e:
self.log.debug(traceback.format_exc())
logger.debug("%s", traceback.format_exc())
if isinstance(e, Exception):
self.log.error("%s", e)
logger.error("%s", e)
def installer(cls):

View File

@ -14,8 +14,6 @@ import sys
import six
from ipapython.ipa_log_manager import root_logger
from . import util
from .util import from_
@ -284,8 +282,6 @@ class Configurable(six.with_metaclass(abc.ABCMeta, object)):
Initialize the configurable.
"""
self.log = root_logger
cls = self.__class__
for owner_cls, name in cls.properties():
if name.startswith('_'):

View File

@ -22,6 +22,7 @@
# Make sure we only run this module at the server where samba4-python
# package is installed to avoid issues with unavailable modules
import logging
import re
import time
@ -76,6 +77,8 @@ The code in this module relies heavily on samba4-python package
and Samba4 python bindings.
""")
logger = logging.getLogger(__name__)
# Both constants can be used as masks against trust direction
# because bi-directional has two lower bits set.
TRUST_ONEWAY = 1
@ -257,8 +260,8 @@ class DomainValidator(object):
except KeyError as exc:
# Some piece of trusted domain info in LDAP is missing
# Skip the domain, but leave log entry for investigation
api.log.warning("Trusted domain '%s' entry misses an "
"attribute: %s", e.dn, exc)
logger.warning("Trusted domain '%s' entry misses an "
"attribute: %s", e.dn, exc)
continue
result[t_partner] = (fname_norm,

View File

@ -2,8 +2,12 @@
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import logging
from ipaserver import p11helper as _ipap11helper
logger = logging.getLogger(__name__)
attrs_id2name = {
#_ipap11helper.CKA_ALLOWED_MECHANISMS: 'ipk11allowedmechanisms',
_ipap11helper.CKA_ALWAYS_AUTHENTICATE: 'ipk11alwaysauthenticate',
@ -106,7 +110,8 @@ modifiable_attrs_id2name = {
modifiable_attrs_name2id = {v: k for k, v in modifiable_attrs_id2name.items()}
def sync_pkcs11_metadata(log, source, target):
def sync_pkcs11_metadata(name, source, target):
"""sync ipk11 metadata from source object to target object"""
# iterate over list of modifiable PKCS#11 attributes - this prevents us
@ -114,7 +119,11 @@ def sync_pkcs11_metadata(log, source, target):
for attr in modifiable_attrs_name2id:
if attr in source:
if source[attr] != target[attr]:
log.debug('Updating attribute %s from "%s" to "%s"', attr, repr(source[attr]), repr(target[attr]))
logger.debug('%s: Updating attribute %s from "%s" to "%s"',
name,
attr,
repr(source[attr]),
repr(target[attr]))
target[attr] = source[attr]
def populate_pkcs11_metadata(source, target):

View File

@ -123,7 +123,6 @@ class Key(collections.MutableMapping):
self._delentry = None # indicates that object was deleted
self.ldap = ldap
self.ldapkeydb = ldapkeydb
self.log = logger.getChild(__name__)
def __assert_not_deleted(self):
assert self.entry and not self._delentry, (
@ -197,9 +196,9 @@ class Key(collections.MutableMapping):
assert not self.entry, (
"Key._delete_key() called before Key.schedule_deletion()")
assert self._delentry, "Key._delete_key() called more than once"
self.log.debug('deleting key id 0x%s DN %s from LDAP',
hexlify(self._delentry.single_value['ipk11id']),
self._delentry.dn)
logger.debug('deleting key id 0x%s DN %s from LDAP',
hexlify(self._delentry.single_value['ipk11id']),
self._delentry.dn)
self.ldap.delete_entry(self._delentry)
self._delentry = None
self.ldap = None
@ -259,10 +258,11 @@ class MasterKey(Key):
ipaWrappingKey=wrapping_key_uri,
ipaWrappingMech=wrapping_mech)
self.log.info('adding master key 0x%s wrapped with replica key 0x%s to %s',
hexlify(self['ipk11id']),
hexlify(replica_key_id),
entry_dn)
logger.info('adding master key 0x%s wrapped with replica key 0x%s to '
'%s',
hexlify(self['ipk11id']),
hexlify(replica_key_id),
entry_dn)
self.ldap.add_entry(entry)
if 'ipaSecretKeyRef' not in self.entry:
self.entry['objectClass'] += ['ipaSecretKeyRefObject']
@ -270,10 +270,9 @@ class MasterKey(Key):
class LdapKeyDB(AbstractHSM):
def __init__(self, log, ldap, base_dn):
def __init__(self, ldap, base_dn):
self.ldap = ldap
self.base_dn = base_dn
self.log = log
self.cache_replica_pubkeys_wrap = None
self.cache_masterkeys = None
self.cache_zone_keypairs = None
@ -348,7 +347,7 @@ class LdapKeyDB(AbstractHSM):
new_key = self._import_keys_metadata(
[(mkey, _ipap11helper.KEY_CLASS_SECRET_KEY)])
self.ldap.add_entry(new_key.entry)
self.log.debug('imported master key metadata: %s', new_key.entry)
logger.debug('imported master key metadata: %s', new_key.entry)
def import_zone_key(self, pubkey, pubkey_data, privkey,
privkey_wrapped_data, wrapping_mech, master_key_id):
@ -366,7 +365,7 @@ class LdapKeyDB(AbstractHSM):
new_key.entry['ipaPublicKey'] = pubkey_data
self.ldap.add_entry(new_key.entry)
self.log.debug('imported zone key id: 0x%s', hexlify(new_key['ipk11id']))
logger.debug('imported zone key id: 0x%s', hexlify(new_key['ipk11id']))
@property
def replica_pubkeys_wrap(self):
@ -431,9 +430,10 @@ if __name__ == '__main__':
ldap.gssapi_bind()
log.debug('Connected')
ldapkeydb = LdapKeyDB(log, ldap, DN(('cn', 'keys'), ('cn', 'sec'),
ipalib.api.env.container_dns,
ipalib.api.env.basedn))
ldapkeydb = LdapKeyDB(ldap, DN(('cn', 'keys'),
('cn', 'sec'),
ipalib.api.env.container_dns,
ipalib.api.env.basedn))
print('replica public keys: CKA_WRAP = TRUE')
print('====================================')

View File

@ -7,7 +7,6 @@ from __future__ import print_function
from binascii import hexlify
import collections
import logging
import os
from pprint import pprint
@ -94,7 +93,6 @@ class LocalHSM(AbstractHSM):
def __init__(self, library, label, pin):
self.cache_replica_pubkeys = None
self.p11 = _ipap11helper.P11_Helper(label, pin, library)
self.log = logging.getLogger()
def __del__(self):
self.p11.finalize()

View File

@ -503,7 +503,7 @@ def check_forwarders(dns_forwarders, logger):
for forwarder in dns_forwarders:
logger.debug("Checking DNS server: %s", forwarder)
try:
validate_dnssec_global_forwarder(forwarder, log=logger)
validate_dnssec_global_forwarder(forwarder)
except DNSSECSignatureMissingError as e:
forwarders_dnssec_valid = False
logger.warning("DNS server %s does not support DNSSEC: %s",

View File

@ -4,6 +4,7 @@
from __future__ import print_function
import logging
import os
import pwd
import grp
@ -25,6 +26,8 @@ from ipalib import errors, api
from ipalib.constants import SOFTHSM_DNSSEC_TOKEN_LABEL
from ipaserver.install.bindinstance import dns_container_exists
logger = logging.getLogger(__name__)
replica_keylabel_template = u"dnssec-replica:%s"
@ -51,7 +54,6 @@ class DNSKeySyncInstance(service.Service):
service_prefix=u'ipa-dnskeysyncd',
keytab=paths.IPA_DNSKEYSYNCD_KEYTAB
)
self.logger = logger
self.extra_config = [u'dnssecVersion 1', ] # DNSSEC enabled
self.named_uid = None
self.named_gid = None
@ -156,7 +158,7 @@ class DNSKeySyncInstance(service.Service):
"""
if dnssec_container_exists(self.suffix):
self.logger.info("DNSSEC container exists (step skipped)")
logger.info("DNSSEC container exists (step skipped)")
return
self._ldap_mod("dnssec.ldif", {'SUFFIX': self.suffix, })
@ -169,7 +171,7 @@ class DNSKeySyncInstance(service.Service):
# create dnssec directory
if not os.path.exists(paths.IPA_DNSSEC_DIR):
self.logger.debug("Creating %s directory", paths.IPA_DNSSEC_DIR)
logger.debug("Creating %s directory", paths.IPA_DNSSEC_DIR)
os.mkdir(paths.IPA_DNSSEC_DIR)
os.chmod(paths.IPA_DNSSEC_DIR, 0o770)
# chown ods:named
@ -182,7 +184,7 @@ class DNSKeySyncInstance(service.Service):
"objectstore.backend = file") % {
'tokens_dir': paths.DNSSEC_TOKENS_DIR
}
self.logger.debug("Creating new softhsm config file")
logger.debug("Creating new softhsm config file")
named_fd = open(paths.DNSSEC_SOFTHSM2_CONF, 'w')
named_fd.seek(0)
named_fd.truncate(0)
@ -208,13 +210,12 @@ class DNSKeySyncInstance(service.Service):
# remove old tokens
if token_dir_exists:
self.logger.debug('Removing old tokens directory %s',
paths.DNSSEC_TOKENS_DIR)
logger.debug('Removing old tokens directory %s',
paths.DNSSEC_TOKENS_DIR)
shutil.rmtree(paths.DNSSEC_TOKENS_DIR)
# create tokens subdirectory
self.logger.debug('Creating tokens %s directory',
paths.DNSSEC_TOKENS_DIR)
logger.debug('Creating tokens %s directory', paths.DNSSEC_TOKENS_DIR)
# sticky bit is required by daemon
os.mkdir(paths.DNSSEC_TOKENS_DIR)
os.chmod(paths.DNSSEC_TOKENS_DIR, 0o770 | stat.S_ISGID)
@ -228,7 +229,7 @@ class DNSKeySyncInstance(service.Service):
pin_so = ipautil.ipa_generate_password(
entropy_bits=0, special=None, min_len=pin_length)
self.logger.debug("Saving user PIN to %s", paths.DNSSEC_SOFTHSM_PIN)
logger.debug("Saving user PIN to %s", paths.DNSSEC_SOFTHSM_PIN)
named_fd = open(paths.DNSSEC_SOFTHSM_PIN, 'w')
named_fd.seek(0)
named_fd.truncate(0)
@ -238,7 +239,7 @@ class DNSKeySyncInstance(service.Service):
# chown to ods:named
os.chown(paths.DNSSEC_SOFTHSM_PIN, self.ods_uid, self.named_gid)
self.logger.debug("Saving SO PIN to %s", paths.DNSSEC_SOFTHSM_PIN_SO)
logger.debug("Saving SO PIN to %s", paths.DNSSEC_SOFTHSM_PIN_SO)
named_fd = open(paths.DNSSEC_SOFTHSM_PIN_SO, 'w')
named_fd.seek(0)
named_fd.truncate(0)
@ -257,7 +258,7 @@ class DNSKeySyncInstance(service.Service):
'--pin', pin,
'--so-pin', pin_so,
]
self.logger.debug("Initializing tokens")
logger.debug("Initializing tokens")
os.environ["SOFTHSM2_CONF"] = paths.DNSSEC_SOFTHSM2_CONF
ipautil.run(command, nolog=(pin, pin_so,))
@ -277,7 +278,7 @@ class DNSKeySyncInstance(service.Service):
try:
# generate replica keypair
self.logger.debug("Creating replica's key pair")
logger.debug("Creating replica's key pair")
key_id = None
while True:
# check if key with this ID exist in softHSM
@ -325,14 +326,14 @@ class DNSKeySyncInstance(service.Service):
'ipk11VerifyRecover': [False],
}
self.logger.debug("Storing replica public key to LDAP, %s",
replica_pubkey_dn)
logger.debug("Storing replica public key to LDAP, %s",
replica_pubkey_dn)
entry = ldap.make_entry(replica_pubkey_dn, **kw)
ldap.add_entry(entry)
self.logger.debug("Replica public key stored")
logger.debug("Replica public key stored")
self.logger.debug("Setting CKA_WRAP=False for old replica keys")
logger.debug("Setting CKA_WRAP=False for old replica keys")
# first create new keys, we don't want disable keys before, we
# have new keys in softhsm and LDAP
@ -366,7 +367,7 @@ class DNSKeySyncInstance(service.Service):
p11.finalize()
# change tokens mod/owner
self.logger.debug("Changing ownership of token files")
logger.debug("Changing ownership of token files")
for (root, dirs, files) in os.walk(paths.DNSSEC_TOKENS_DIR):
for directory in dirs:
dir_path = os.path.join(root, directory)
@ -384,7 +385,7 @@ class DNSKeySyncInstance(service.Service):
self.ldap_enable('DNSKeySync', self.fqdn, None,
self.suffix, self.extra_config)
except errors.DuplicateEntry:
self.logger.error("DNSKeySync service already exists")
logger.error("DNSKeySync service already exists")
def __setup_principal(self):
assert self.ods_gid is not None
@ -415,8 +416,8 @@ class DNSKeySyncInstance(service.Service):
except ldap.TYPE_OR_VALUE_EXISTS:
pass
except Exception as e:
self.logger.critical("Could not modify principal's %s entry: %s"
% (dnssynckey_principal_dn, str(e)))
logger.critical("Could not modify principal's %s entry: %s",
dnssynckey_principal_dn, str(e))
raise
# bind-dyndb-ldap persistent search feature requires both size and time
@ -429,8 +430,8 @@ class DNSKeySyncInstance(service.Service):
try:
api.Backend.ldap2.modify_s(dnssynckey_principal_dn, mod)
except Exception as e:
self.logger.critical("Could not set principal's %s LDAP limits: %s"
% (dnssynckey_principal_dn, str(e)))
logger.critical("Could not set principal's %s LDAP limits: %s",
dnssynckey_principal_dn, str(e))
raise
def __start(self):
@ -438,7 +439,7 @@ class DNSKeySyncInstance(service.Service):
self.restart()
except Exception as e:
print("Failed to start ipa-dnskeysyncd")
self.logger.debug("Failed to start ipa-dnskeysyncd: %s", e)
logger.debug("Failed to start ipa-dnskeysyncd: %s", e)
def uninstall(self):
@ -458,7 +459,7 @@ class DNSKeySyncInstance(service.Service):
try:
self.fstore.restore_file(f)
except ValueError as error:
self.logger.debug(error)
logger.debug('%s', error)
# remove softhsm pin, to make sure new installation will generate
# new token database

View File

@ -109,7 +109,6 @@ class RemoveRUVParser(ldif.LDIFParser):
def __init__(self, input_file, writer):
ldif.LDIFParser.__init__(self, input_file)
self.writer = writer
self.log = logger
def handle(self, dn, entry):
objectclass = None
@ -125,7 +124,7 @@ class RemoveRUVParser(ldif.LDIFParser):
if (objectclass and nsuniqueid and
'nstombstone' in objectclass and
'ffffffff-ffffffff-ffffffff-ffffffff' in nsuniqueid):
self.log.debug("Removing RUV entry %s", dn)
logger.debug("Removing RUV entry %s", dn)
return
self.writer.unparse(dn, entry)

View File

@ -414,16 +414,15 @@ class _sssd(object):
:raise RemoteRetrieveError: if DBus error occurs
"""
try:
self.log = logger
self._bus = dbus.SystemBus()
self._users_obj = self._bus.get_object(
DBUS_SSSD_NAME, DBUS_SSSD_USERS_PATH)
self._users_iface = dbus.Interface(
self._users_obj, DBUS_SSSD_USERS_IF)
except dbus.DBusException as e:
self.log.error(
'Failed to initialize DBus interface {iface}. DBus '
'exception is {exc}.'.format(iface=DBUS_SSSD_USERS_IF, exc=e)
logger.error(
'Failed to initialize DBus interface %s. DBus '
'exception is %s.', DBUS_SSSD_USERS_IF, e
)
raise errors.RemoteRetrieveError(
reason=_('Failed to connect to sssd over SystemBus. '
@ -469,9 +468,9 @@ class _sssd(object):
# exception and return an empty list
if err_name == 'org.freedesktop.sssd.Error.NotFound':
return dict()
self.log.error(
'Failed to use interface {iface}. DBus '
'exception is {exc}.'.format(iface=DBUS_SSSD_USERS_IF, exc=e))
logger.error(
'Failed to use interface %s. DBus '
'exception is %s.', DBUS_SSSD_USERS_IF, e)
raise errors.RemoteRetrieveError(
reason=_('Failed to find users over SystemBus. '
' See details in the error_log'))

View File

@ -145,7 +145,7 @@ def remove_ptr_rec(ipaddr, fqdn):
Remove PTR record of IP address (ipaddr)
:return: True if PTR record was removed, False if record was not found
"""
api.log.debug('deleting PTR record of ipaddr %s', ipaddr)
logger.debug('deleting PTR record of ipaddr %s', ipaddr)
try:
revzone, revname = get_reverse_zone(ipaddr)
@ -155,7 +155,7 @@ def remove_ptr_rec(ipaddr, fqdn):
api.Command['dnsrecord_del'](revzone, revname, **delkw)
except (errors.NotFound, errors.AttrValueNotFound):
api.log.debug('PTR record of ipaddr %s not found', ipaddr)
logger.debug('PTR record of ipaddr %s not found', ipaddr)
return False
return True

View File

@ -193,8 +193,9 @@ def _pre_migrate_user(ldap, pkey, dn, entry_attrs, failed, config, ctx, **kwargs
# See if the gidNumber at least points to a valid group on the remote
# server.
if entry_attrs['gidnumber'][0] in invalid_gids:
api.log.warning('GID number %s of migrated user %s does not point to a known group.' \
% (entry_attrs['gidnumber'][0], pkey))
logger.warning('GID number %s of migrated user %s does not point '
'to a known group.',
entry_attrs['gidnumber'][0], pkey)
elif entry_attrs['gidnumber'][0] not in valid_gids:
try:
remote_entry = ds_ldap.find_entry_by_attr(
@ -203,15 +204,18 @@ def _pre_migrate_user(ldap, pkey, dn, entry_attrs, failed, config, ctx, **kwargs
)
valid_gids.add(entry_attrs['gidnumber'][0])
except errors.NotFound:
api.log.warning('GID number %s of migrated user %s does not point to a known group.' \
% (entry_attrs['gidnumber'][0], pkey))
logger.warning('GID number %s of migrated user %s does not '
'point to a known group.',
entry_attrs['gidnumber'][0], pkey)
invalid_gids.add(entry_attrs['gidnumber'][0])
except errors.SingleMatchExpected as e:
# GID number matched more groups, this should not happen
api.log.warning('GID number %s of migrated user %s should match 1 group, but it matched %d groups' \
% (entry_attrs['gidnumber'][0], pkey, e.found))
logger.warning('GID number %s of migrated user %s should '
'match 1 group, but it matched %d groups',
entry_attrs['gidnumber'][0], pkey, e.found)
except errors.LimitsExceeded as e:
api.log.warning('Search limit exceeded searching for GID %s' % entry_attrs['gidnumber'][0])
logger.warning('Search limit exceeded searching for GID %s',
entry_attrs['gidnumber'][0])
# We don't want to create a UPG so set the magic value in description
# to let the DS plugin know.
@ -254,19 +258,22 @@ def _pre_migrate_user(ldap, pkey, dn, entry_attrs, failed, config, ctx, **kwargs
# value is not DN instance, the automatic encoding may have
# failed due to missing schema or the remote attribute type OID was
# not detected as DN type. Try to work this around
api.log.debug('%s: value %s of type %s in attribute %s is not a DN'
', convert it', pkey, value, type(value), attr)
logger.debug('%s: value %s of type %s in attribute %s is '
'not a DN, convert it',
pkey, value, type(value), attr)
try:
value = DN(value)
except ValueError as e:
api.log.warning('%s: skipping normalization of value %s of type %s '
'in attribute %s which could not be converted to DN: %s',
pkey, value, type(value), attr, e)
logger.warning('%s: skipping normalization of value '
'%s of type %s in attribute %s which '
'could not be converted to DN: %s',
pkey, value, type(value), attr, e)
continue
try:
remote_entry = ds_ldap.get_entry(value, [api.Object.user.primary_key.name, api.Object.group.primary_key.name])
except errors.NotFound:
api.log.warning('%s: attribute %s refers to non-existent entry %s' % (pkey, attr, value))
logger.warning('%s: attribute %s refers to non-existent '
'entry %s', pkey, attr, value)
continue
if value.endswith(search_bases['user']):
primary_key = api.Object.user.primary_key.name
@ -275,14 +282,18 @@ def _pre_migrate_user(ldap, pkey, dn, entry_attrs, failed, config, ctx, **kwargs
primary_key = api.Object.group.primary_key.name
container = api.env.container_group
else:
api.log.warning('%s: value %s in attribute %s does not belong into any known container' % (pkey, value, attr))
logger.warning('%s: value %s in attribute %s does not '
'belong into any known container',
pkey, value, attr)
continue
if not remote_entry.get(primary_key):
api.log.warning('%s: there is no primary key %s to migrate for %s' % (pkey, primary_key, attr))
logger.warning('%s: there is no primary key %s to migrate '
'for %s', pkey, primary_key, attr)
continue
api.log.debug('converting DN value %s for %s in %s' % (value, attr, dn))
logger.debug('converting DN value %s for %s in %s',
value, attr, dn)
rdnval = remote_entry[primary_key][0].lower()
entry_attrs[attr][ind] = DN((primary_key, rdnval), container, api.env.basedn)
@ -318,7 +329,7 @@ def _update_default_group(ldap, ctx, force):
searchfilter, [''], DN(api.env.container_user, api.env.basedn),
scope=ldap.SCOPE_SUBTREE, time_limit=-1, size_limit=-1)
except errors.NotFound:
api.log.debug('All users have default group set')
logger.debug('All users have default group set')
return
member_dns = [m.dn for m in result]
@ -327,14 +338,14 @@ def _update_default_group(ldap, ctx, force):
with ldap.error_handler():
ldap.conn.modify_s(str(group_dn), modlist)
except errors.DatabaseError as e:
api.log.error('Adding new members to default group failed: %s \n'
'members: %s', e, ','.join(member_dns))
logger.error('Adding new members to default group failed: %s \n'
'members: %s', e, ','.join(member_dns))
e = datetime.datetime.now()
d = e - s
mode = " (forced)" if force else ""
api.log.info('Adding %d users to group%s duration %s',
len(member_dns), mode, d)
logger.info('Adding %d users to group%s duration %s',
len(member_dns), mode, d)
# GROUP MIGRATION CALLBACKS AND VARS
@ -352,24 +363,25 @@ def _pre_migrate_group(ldap, pkey, dn, entry_attrs, failed, config, ctx, **kwarg
except ValueError as e:
# This should be impossible unless the remote server
# doesn't enforce syntax checking.
api.log.error('Malformed DN %s: %s' % (m, e))
logger.error('Malformed DN %s: %s', m, e)
continue
try:
rdnval = m[0].value
except IndexError:
api.log.error('Malformed DN %s has no RDN?' % m)
logger.error('Malformed DN %s has no RDN?', m)
continue
if m.endswith(search_bases['user']):
api.log.debug('migrating %s user %s', member_attr, m)
logger.debug('migrating %s user %s', member_attr, m)
m = DN((api.Object.user.primary_key.name, rdnval),
api.env.container_user, api.env.basedn)
elif m.endswith(search_bases['group']):
api.log.debug('migrating %s group %s', member_attr, m)
logger.debug('migrating %s group %s', member_attr, m)
m = DN((api.Object.group.primary_key.name, rdnval),
api.env.container_group, api.env.basedn)
else:
api.log.error('entry %s does not belong into any known container' % m)
logger.error('entry %s does not belong into any known '
'container', m)
continue
new_members.append(m)
@ -861,8 +873,10 @@ migration process might be incomplete\n''')
total_dur = e - migration_start
migrate_cnt += 1
if migrate_cnt > 0 and migrate_cnt % 100 == 0:
api.log.info("%d %ss migrated. %s elapsed." % (migrate_cnt, ldap_obj_name, total_dur))
api.log.debug("%d %ss migrated, duration: %s (total %s)" % (migrate_cnt, ldap_obj_name, d, total_dur))
logger.info("%d %ss migrated. %s elapsed.",
migrate_cnt, ldap_obj_name, total_dur)
logger.debug("%d %ss migrated, duration: %s (total %s)",
migrate_cnt, ldap_obj_name, d, total_dur)
if 'def_group_dn' in context:
_update_default_group(ldap, context, True)

View File

@ -75,12 +75,6 @@ fake_api_env = {'env': [
# this is due ipaserver.rpcserver.KerberosSession where api is undefined
fake_api = {'api': [fake_api_env] + NAMESPACE_ATTRS}
_LOGGING_ATTRS = ['debug', 'info', 'warning', 'error', 'exception',
'critical']
LOGGING_ATTRS = [
{'log': _LOGGING_ATTRS},
] + _LOGGING_ATTRS
# 'class': ['generated', 'properties']
ipa_class_members = {
# Python standard library & 3rd party classes
@ -102,9 +96,10 @@ ipa_class_members = {
'validate_api',
'startup_traceback',
'verbose',
'debug',
'server',
{'domain': dir(str)},
] + LOGGING_ATTRS,
],
'ipalib.errors.ACIError': [
'info',
],
@ -200,7 +195,7 @@ ipa_class_members = {
],
'ipalib.plugable.API': [
fake_api_env,
] + NAMESPACE_ATTRS + LOGGING_ATTRS,
] + NAMESPACE_ATTRS,
'ipalib.plugable.Plugin': [
'Object',
'Method',