logging: remove object-specific loggers

Remove all object-specific loggers, with the exception of `Plugin.log`,
which is now deprecated. Replace affected logger calls with module-level
logger calls.

Deprecate object-specific loggers in `ipa_log_manager.get_logger`.

Reviewed-By: Martin Basti <mbasti@redhat.com>
This commit is contained in:
Jan Cholasta
2017-05-23 18:35:57 +02:00
committed by Martin Basti
parent 9d19654cbd
commit ffadcb0414
72 changed files with 1095 additions and 783 deletions

View File

@@ -17,6 +17,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import os
import tempfile
import shutil
@@ -34,6 +35,8 @@ from ipaplatform.tasks import tasks
from ipalib import api, errors, x509
from ipalib.constants import IPA_CA_NICKNAME, RENEWAL_CA_NAME
logger = logging.getLogger(__name__)
class CertUpdate(admintool.AdminTool):
command_name = 'ipa-certupdate'
@@ -103,10 +106,9 @@ class CertUpdate(admintool.AdminTool):
# pylint: disable=import-error,ipa-forbidden-import
from ipaserver.install import cainstance
# pylint: enable=import-error,ipa-forbidden-import
cainstance.add_lightweight_ca_tracking_requests(
self.log, lwcas)
cainstance.add_lightweight_ca_tracking_requests(lwcas)
except Exception:
self.log.exception(
logger.exception(
"Failed to add lightweight CA tracking requests")
self.update_client(certs)
@@ -124,7 +126,7 @@ class CertUpdate(admintool.AdminTool):
try:
ipa_db.delete_cert(nickname)
except ipautil.CalledProcessError as e:
self.log.error("Failed to remove %s from %s: %s",
logger.error("Failed to remove %s from %s: %s",
nickname, ipa_db.secdir, e)
break
@@ -153,7 +155,7 @@ class CertUpdate(admintool.AdminTool):
if request_id is not None:
timeout = api.env.startup_timeout + 60
self.log.debug("resubmitting certmonger request '%s'", request_id)
logger.debug("resubmitting certmonger request '%s'", request_id)
certmonger.resubmit_request(
request_id, ca='dogtag-ipa-ca-renew-agent-reuse', profile='')
try:
@@ -168,7 +170,7 @@ class CertUpdate(admintool.AdminTool):
"Error resubmitting certmonger request '%s', "
"please check the request manually" % request_id)
self.log.debug("modifying certmonger request '%s'", request_id)
logger.debug("modifying certmonger request '%s'", request_id)
certmonger.modify(request_id, ca='dogtag-ipa-ca-renew-agent')
self.update_file(paths.CA_CRT, certs)
@@ -179,7 +181,7 @@ class CertUpdate(admintool.AdminTool):
try:
x509.write_certificate_list(certs, filename)
except Exception as e:
self.log.error("failed to update %s: %s", filename, e)
logger.error("failed to update %s: %s", filename, e)
def update_db(self, path, certs):
db = certdb.NSSDatabase(path)
@@ -189,5 +191,5 @@ class CertUpdate(admintool.AdminTool):
try:
db.add_cert(cert, nickname, trust_flags)
except ipautil.CalledProcessError as e:
self.log.error(
logger.error(
"failed to update %s in %s: %s", nickname, path, e)

View File

@@ -527,7 +527,7 @@ the following:
Also note that your ``execute()`` method should not contain any ``print``
statements or otherwise cause any output on ``sys.stdout``. Your command can
(and should) produce log messages by using ``self.log`` (see below).
(and should) produce log messages by using a module-level logger (see below).
To learn more about XML-RPC (XML Remote Procedure Call), see:
@@ -670,18 +670,19 @@ See the `ipalib.cli.textui` plugin for a description of its methods.
Logging from your plugin
------------------------
After `plugable.Plugin.__init__()` is called, your plugin will have a
``self.log`` attribute. Plugins should only log through this attribute.
Plugins should log through a module-level logger.
For example:
>>> import logging
>>> logger = logging.getLogger(__name__)
>>> class paint_house(Command):
...
... takes_args = 'color'
...
... def execute(self, color, **options):
... """Uses self.log.error()"""
... """Uses logger.error()"""
... if color not in ('red', 'blue', 'green'):
... self.log.error("I don't have %s paint!", color) # Log error
... logger.error("I don't have %s paint!", color) # Log error
... return
... return 'I painted the house %s.' % color
...
@@ -691,7 +692,7 @@ Some basic knowledge of the Python ``logging`` module might be helpful. See:
http://docs.python.org/library/logging.html
The important thing to remember is that your plugin should not configure
logging itself, but should instead simply use the ``self.log`` logger.
logging itself, but should instead simply use the module-level logger.
Also see the `plugable.API.bootstrap()` method for details on how the logging
is configured.

View File

@@ -21,6 +21,7 @@
Base classes for all backed-end plugins.
"""
import logging
import threading
import os
@@ -28,6 +29,8 @@ from ipalib import plugable
from ipalib.errors import PublicError, InternalError, CommandError
from ipalib.request import context, Connection, destroy_context
logger = logging.getLogger(__name__)
class Backend(plugable.Plugin):
"""
@@ -66,7 +69,7 @@ class Connectible(Backend):
conn = self.create_connection(*args, **kw)
setattr(context, self.id, Connection(conn, self.disconnect))
assert self.conn is conn
self.debug('Created connection context.%s' % self.id)
logger.debug('Created connection context.%s', self.id)
def create_connection(self, *args, **kw):
raise NotImplementedError('%s.create_connection()' % self.id)
@@ -82,7 +85,7 @@ class Connectible(Backend):
)
self.destroy_connection()
delattr(context, self.id)
self.debug('Destroyed connection context.%s' % self.id)
logger.debug('Destroyed connection context.%s', self.id)
def destroy_connection(self):
raise NotImplementedError('%s.destroy_connection()' % self.id)
@@ -140,7 +143,7 @@ class Executioner(Backend):
except PublicError as e:
error = e
except Exception as e:
self.exception(
logger.exception(
'non-public: %s: %s', e.__class__.__name__, str(e)
)
error = InternalError()

View File

@@ -20,6 +20,8 @@
"""
Base classes for all front-end plugins.
"""
import logging
import six
from ipapython.version import API_VERSION
@@ -41,6 +43,8 @@ from ipalib.util import classproperty, json_serialize
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
RULE_FLAG = 'validation_rule'
def rule(obj):
@@ -459,14 +463,14 @@ class Command(HasParam):
self.add_message(
messages.VersionMissing(server_version=self.api_version))
params = self.args_options_2_params(*args, **options)
self.debug(
logger.debug(
'raw: %s(%s)', self.name, ', '.join(self._repr_iter(**params))
)
if self.api.env.in_server:
params.update(self.get_default(**params))
params = self.normalize(**params)
params = self.convert(**params)
self.debug(
logger.debug(
'%s(%s)', self.name, ', '.join(self._repr_iter(**params))
)
if self.api.env.in_server:
@@ -1463,7 +1467,7 @@ class Updater(Plugin):
raise NotImplementedError('%s.execute()' % self.name)
def __call__(self, **options):
self.debug(
logger.debug(
'raw: %s', self.name
)

View File

@@ -34,6 +34,7 @@ from __future__ import absolute_import
from decimal import Decimal
import datetime
import logging
import os
import locale
import base64
@@ -91,6 +92,8 @@ else:
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
COOKIE_NAME = 'ipa_session'
CCACHE_COOKIE_KEY = 'X-IPA-Session-Cookie'
@@ -889,8 +892,7 @@ class RPCClient(Connectible):
cookie_string = read_persistent_client_session_data(principal)
cookie_string = cookie_string.decode('utf-8')
except Exception as e:
self.log.debug('Error reading client session data: {err}'
.format(err=e))
logger.debug('Error reading client session data: %s', e)
return None
# Search for the session cookie within the cookie string
@@ -899,9 +901,9 @@ class RPCClient(Connectible):
cookie_string, COOKIE_NAME,
timestamp=datetime.datetime.utcnow())
except Exception as e:
self.log.debug(
'Error retrieving cookie from the persistent storage: {err}'
.format(err=e))
logger.debug(
'Error retrieving cookie from the persistent storage: %s',
e)
return None
return session_cookie
@@ -934,33 +936,37 @@ class RPCClient(Connectible):
session_cookie = self.get_session_cookie_from_persistent_storage(principal)
if session_cookie is None:
self.log.debug("failed to find session_cookie in persistent storage for principal '%s'",
logger.debug("failed to find session_cookie in persistent storage "
"for principal '%s'",
principal)
return original_url
else:
self.debug("found session_cookie in persistent storage for principal '%s', cookie: '%s'",
logger.debug("found session_cookie in persistent storage for "
"principal '%s', cookie: '%s'",
principal, session_cookie)
# Decide if we should send the cookie to the server
try:
session_cookie.http_return_ok(original_url)
except Cookie.Expired as e:
self.debug("deleting session data for principal '%s': %s", principal, e)
logger.debug("deleting session data for principal '%s': %s",
principal, e)
try:
delete_persistent_client_session_data(principal)
except Exception as e:
pass
return original_url
except Cookie.URLMismatch as e:
self.debug("not sending session cookie, URL mismatch: %s", e)
logger.debug("not sending session cookie, URL mismatch: %s", e)
return original_url
except Exception as e:
self.error("not sending session cookie, unknown error: %s", e)
logger.error("not sending session cookie, unknown error: %s", e)
return original_url
# O.K. session_cookie is valid to be returned, stash it away where it will will
# get included in a HTTP Cookie headed sent to the server.
self.log.debug("setting session_cookie into context '%s'", session_cookie.http_cookie())
logger.debug("setting session_cookie into context '%s'",
session_cookie.http_cookie())
setattr(context, 'session_cookie', session_cookie.http_cookie())
# Form the session URL by substituting the session path into the original URL
@@ -1023,7 +1029,7 @@ class RPCClient(Connectible):
transport_class = LanguageAwareTransport
proxy_kw['transport'] = transport_class(
protocol=self.protocol, service='HTTP', ccache=ccache)
self.log.info('trying %s' % url)
logger.info('trying %s', url)
setattr(context, 'request_url', url)
serverproxy = self.server_proxy_class(url, **proxy_kw)
if len(urls) == 1:
@@ -1066,7 +1072,7 @@ class RPCClient(Connectible):
if not fallback:
raise
else:
self.log.info(
logger.info(
'Connection to %s failed with %s', url, e)
# try the next url
break
@@ -1074,7 +1080,7 @@ class RPCClient(Connectible):
if not fallback:
raise
else:
self.log.info(
logger.info(
'Connection to %s failed with %s', url, e)
# try the next url
break
@@ -1113,13 +1119,13 @@ class RPCClient(Connectible):
# each time should we be getting UNAUTHORIZED error from the server
max_tries = 5
for try_num in range(0, max_tries):
self.log.info("[try %d]: Forwarding '%s' to %s server '%s'",
logger.info("[try %d]: Forwarding '%s' to %s server '%s'",
try_num+1, name, self.protocol, server)
try:
return self._call_command(command, params)
except Fault as e:
e = decode_fault(e)
self.debug('Caught fault %d from server %s: %s', e.faultCode,
logger.debug('Caught fault %d from server %s: %s', e.faultCode,
server, e.faultString)
if e.faultCode in errors_by_code:
error = errors_by_code[e.faultCode]
@@ -1143,8 +1149,8 @@ class RPCClient(Connectible):
except Exception as e:
# This shouldn't happen if we have a session
# but it isn't fatal.
self.debug("Error trying to remove persisent session "
"data: {err}".format(err=e))
logger.debug("Error trying to remove persisent "
"session data: %s", e)
# Create a new serverproxy with the non-session URI
serverproxy = self.create_connection(

View File

@@ -26,6 +26,7 @@ from __future__ import (
print_function,
)
import logging
import os
import socket
import re
@@ -64,6 +65,8 @@ from ipapython.ipa_log_manager import root_logger
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
def json_serialize(obj):
if isinstance(obj, (list, tuple)):
@@ -697,7 +700,6 @@ def _log_response(log, e):
:param e: DNSException
"""
assert isinstance(e, DNSException)
if log is not None:
response = getattr(e, 'kwargs', {}).get('response')
if response:
log.debug("DNSException: %s; server response: %s", e, response)
@@ -736,7 +738,7 @@ def _resolve_record(owner, rtype, nameserver_ip=None, edns0=False,
return res.query(owner, rtype)
def _validate_edns0_forwarder(owner, rtype, ip_addr, log=None, timeout=10):
def _validate_edns0_forwarder(owner, rtype, ip_addr, log=logger, timeout=10):
"""
Validate if forwarder supports EDNS0
@@ -760,7 +762,7 @@ def _validate_edns0_forwarder(owner, rtype, ip_addr, log=None, timeout=10):
error=e)
def validate_dnssec_global_forwarder(ip_addr, log=None, timeout=10):
def validate_dnssec_global_forwarder(ip_addr, log=logger, timeout=10):
"""Test DNS forwarder properties. against root zone.
Global forwarders should be able return signed root zone
@@ -793,17 +795,16 @@ def validate_dnssec_global_forwarder(ip_addr, log=None, timeout=10):
raise DNSSECSignatureMissingError(owner=owner, rtype=rtype, ip=ip_addr)
def validate_dnssec_zone_forwarder_step1(ip_addr, fwzone, log=None, timeout=10):
def validate_dnssec_zone_forwarder_step1(ip_addr, fwzone, timeout=10):
"""
Only forwarders in forward zones can be validated in this way
:raise UnresolvableRecordError: record cannot be resolved
:raise EDNS0UnsupportedError: ENDS0 is not supported by forwarder
"""
_validate_edns0_forwarder(fwzone, "SOA", ip_addr, log=log, timeout=timeout)
_validate_edns0_forwarder(fwzone, "SOA", ip_addr, timeout=timeout)
def validate_dnssec_zone_forwarder_step2(ipa_ip_addr, fwzone, log=None,
timeout=10):
def validate_dnssec_zone_forwarder_step2(ipa_ip_addr, fwzone, timeout=10):
"""
This step must be executed after forwarders are added into LDAP, and only
when we are sure the forwarders work.
@@ -813,6 +814,7 @@ def validate_dnssec_zone_forwarder_step2(ipa_ip_addr, fwzone, log=None,
:raise UnresolvableRecordError: record cannot be resolved
:raise DNSSECValidationError: response from forwarder is not DNSSEC valid
"""
log = logger
rtype = "SOA"
try:
ans_cd = _resolve_record(fwzone, rtype, nameserver_ip=ipa_ip_addr,

View File

@@ -31,6 +31,9 @@ from optparse import OptionGroup # pylint: disable=deprecated-module
from ipapython import version
from ipapython import config
from ipapython import ipa_log_manager
from ipapython.ipa_log_manager import standard_logging_setup
logger = logging.getLogger(__name__)
class ScriptError(Exception):
@@ -88,7 +91,6 @@ class AdminTool(object):
usage = None
description = None
log = None
_option_parsers = dict()
@classmethod
@@ -226,7 +228,7 @@ class AdminTool(object):
- a plain print for things that should not be log (for example,
interactive prompting)
To log, use `self.log.info()`, `self.log.warning()`, etc.
To log, use a module-level logger.
Logging to file is only set up after option validation and prompting;
before that, all output will go to the console only.
@@ -258,14 +260,13 @@ class AdminTool(object):
verbose = False
else:
verbose = True
ipa_log_manager.standard_logging_setup(
standard_logging_setup(
log_file_name, console_format=console_format,
filemode=log_file_mode, debug=debug, verbose=verbose)
self.log = ipa_log_manager.log_mgr.get_logger(self)
if log_file_name:
self.log.debug('Logging to %s' % log_file_name)
logger.debug('Logging to %s', log_file_name)
elif not no_file:
self.log.debug('Not logging to a file')
logger.debug('Not logging to a file')
def handle_error(self, exception):
@@ -290,20 +291,20 @@ class AdminTool(object):
assumed to have run successfully, and the return value is used as the
SystemExit code.
"""
self.log.debug('%s was invoked with arguments %s and options: %s',
logger.debug('%s was invoked with arguments %s and options: %s',
self.command_name, self.args, self.safe_options)
self.log.debug('IPA version %s' % version.VENDOR_VERSION)
logger.debug('IPA version %s', version.VENDOR_VERSION)
def log_failure(self, error_message, return_value, exception, backtrace):
self.log.debug(''.join(traceback.format_tb(backtrace)))
self.log.debug('The %s command failed, exception: %s: %s',
logger.debug('%s', ''.join(traceback.format_tb(backtrace)))
logger.debug('The %s command failed, exception: %s: %s',
self.command_name, type(exception).__name__, exception)
if error_message:
self.log.error(error_message)
logger.error('%s', error_message)
message = "The %s command failed." % self.command_name
if self.log_file_name:
message += " See %s for more information" % self.log_file_name
self.log.error(message)
logger.error('%s', message)
def log_success(self):
self.log.info('The %s command was successful', self.command_name)
logger.info('The %s command was successful', self.command_name)

View File

@@ -27,8 +27,6 @@ import six
from six.moves.urllib.parse import urlparse
# pylint: enable=import-error
from ipapython.ipa_log_manager import log_mgr
'''
Core Python has two cookie libraries, Cookie.py targeted to server
side and cookielib.py targeted to client side. So why this module and
@@ -354,9 +352,6 @@ class Cookie(object):
def __init__(self, key, value, domain=None, path=None, max_age=None, expires=None,
secure=None, httponly=None, timestamp=None):
log_mgr.get_logger(self, True)
self.key = key
self.value = value
self.domain = domain

View File

@@ -9,12 +9,14 @@ Command line support.
import collections
import enum
import functools
import logging
import optparse # pylint: disable=deprecated-module
import signal
import six
from ipapython import admintool, ipa_log_manager
from ipapython import admintool
from ipapython.ipa_log_manager import standard_logging_setup
from ipapython.ipautil import CheckedIPAddress, private_ccache
from . import core, common
@@ -26,6 +28,8 @@ if six.PY3:
NoneType = type(None)
logger = logging.getLogger(__name__)
def _get_usage(configurable_class):
usage = '%prog [options]'
@@ -285,16 +289,15 @@ class ConfigureTool(admintool.AdminTool):
log_file_name = self.options.log_file
else:
log_file_name = self.log_file_name
ipa_log_manager.standard_logging_setup(
standard_logging_setup(
log_file_name,
verbose=self.verbose,
debug=self.options.verbose,
console_format=self.console_format)
self.log = ipa_log_manager.log_mgr.get_logger(self)
if log_file_name:
self.log.debug('Logging to %s' % log_file_name)
logger.debug('Logging to %s', log_file_name)
elif not no_file:
self.log.debug('Not logging to a file')
logger.debug('Not logging to a file')
def run(self):
kwargs = {}

View File

@@ -20,6 +20,7 @@ import logging
import os
import re
import time
import warnings
import sys
import six
@@ -56,14 +57,57 @@ LOGGING_FORMAT_STANDARD_CONSOLE = '%(name)-12s: %(levelname)-8s %(message)s'
LOGGING_FORMAT_STANDARD_FILE = '%(asctime)s %(levelname)s %(message)s'
class _DeprecatedLogger(object):
def __init__(self, logger, name):
self._logger = logger
self._name = name
def _warn(self):
warnings.warn(
"{} is deprecated, use a module-level logger".format(self._name),
DeprecationWarning)
def debug(self, *args, **kwargs):
self._warn()
self._logger.debug(*args, **kwargs)
def info(self, *args, **kwargs):
self._warn()
self._logger.info(*args, **kwargs)
def warning(self, *args, **kwargs):
self._warn()
self._logger.warning(*args, **kwargs)
def error(self, *args, **kwargs):
self._warn()
self._logger.error(*args, **kwargs)
def critical(self, *args, **kwargs):
self._warn()
self._logger.critical(*args, **kwargs)
def exception(self, *args, **kwargs):
self._warn()
self._logger.exception(*args, **kwargs)
def get_logger(who, bind_logger_names=False):
if isinstance(who, six.string_types):
logger_name = who
else:
logger_name = '%s.%s' % (who.__module__, who.__class__.__name__)
caller_globals = sys._getframe(1).f_globals
logger_name = caller_globals.get('__name__', '__main__')
if logger_name == '__main__':
logger_name = caller_globals.get('__file__', logger_name)
logger_name = os.path.basename(logger_name)
logger = logging.getLogger(logger_name)
if not isinstance(who, six.string_types):
obj_name = '%s.%s' % (who.__module__, who.__class__.__name__)
logger = _DeprecatedLogger(logger, obj_name)
if bind_logger_names:
method = 'log'
if hasattr(who, method):

View File

@@ -20,6 +20,7 @@
#
import binascii
import logging
import time
import datetime
from decimal import Decimal
@@ -44,7 +45,6 @@ from ipalib import errors, _
from ipalib.constants import LDAP_GENERALIZED_TIME_FORMAT
# pylint: enable=ipa-forbidden-import
from ipapython.ipautil import format_netloc, CIDict
from ipapython.ipa_log_manager import log_mgr
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
from ipapython.kerberos import Principal
@@ -52,6 +52,8 @@ from ipapython.kerberos import Principal
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
# Global variable to define SASL auth
SASL_GSSAPI = ldap.sasl.sasl({}, 'GSSAPI')
SASL_GSS_SPNEGO = ldap.sasl.sasl({}, 'GSS-SPNEGO')
@@ -89,7 +91,6 @@ class SchemaCache(object):
'''
def __init__(self):
self.log = log_mgr.get_logger(self)
self.servers = {}
def get_schema(self, url, conn, force_update=False):
@@ -113,7 +114,7 @@ class SchemaCache(object):
return server_schema.schema
def flush(self, url):
self.log.debug('flushing %s from SchemaCache', url)
logger.debug('flushing %s from SchemaCache', url)
try:
del self.servers[url]
except KeyError:
@@ -134,7 +135,7 @@ class SchemaCache(object):
"""
assert conn is not None
self.log.debug(
logger.debug(
'retrieving schema for SchemaCache url=%s conn=%s', url, conn)
try:
@@ -144,7 +145,7 @@ class SchemaCache(object):
except ldap.NO_SUCH_OBJECT:
# try different location for schema
# openldap has schema located in cn=subschema
self.log.debug('cn=schema not found, fallback to cn=subschema')
logger.debug('cn=schema not found, fallback to cn=subschema')
schema_entry = conn.search_s('cn=subschema', ldap.SCOPE_BASE,
attrlist=['attributetypes', 'objectclasses'])[0]
except ldap.SERVER_DOWN:
@@ -721,7 +722,6 @@ class LDAPClient(object):
self._cacert = cacert
self._sasl_nocanon = sasl_nocanon
self.log = log_mgr.get_logger(self)
self._has_schema = False
self._schema = None
@@ -896,7 +896,7 @@ class LDAPClient(object):
return target_type(val)
except Exception:
msg = 'unable to convert the attribute %r value %r to type %s' % (attr, val, target_type)
self.log.error(msg)
logger.error('%s', msg)
raise ValueError(msg)
elif isinstance(val, list):
return [self.decode(m, attr) for m in val]
@@ -933,7 +933,7 @@ class LDAPClient(object):
if original_dn is None:
log_msg = 'Referral entry ignored: {ref}'\
.format(ref=str(original_attrs))
self.log.debug(log_msg)
logger.debug('%s', log_msg)
continue
@@ -946,7 +946,7 @@ class LDAPClient(object):
ipa_result.append(ipa_entry)
if _debug_log_ldap:
self.log.debug('ldap.result: %s', ipa_result)
logger.debug('ldap.result: %s', ipa_result)
return ipa_result
@contextlib.contextmanager
@@ -1019,8 +1019,8 @@ class LDAPClient(object):
if 'NOT_ALLOWED_TO_DELEGATE' in info:
raise errors.ACIError(
info="KDC returned NOT_ALLOWED_TO_DELEGATE")
self.log.debug(
'Unhandled LDAPError: %s: %s' % (type(e).__name__, str(e)))
logger.debug(
'Unhandled LDAPError: %s: %s', type(e).__name__, str(e))
raise errors.DatabaseError(desc=desc, info=info)
@staticmethod
@@ -1317,11 +1317,10 @@ class LDAPClient(object):
try:
self.handle_truncated_result(truncated)
except errors.LimitsExceeded as e:
self.log.error(
"{} while getting entries (base DN: {}, filter: {})".format(
logger.error(
"%s while getting entries (base DN: %s, filter: %s)",
e, base_dn, filter
)
)
raise
return entries
@@ -1429,7 +1428,7 @@ class LDAPClient(object):
serverctrls=sctrls, timeout=time_limit,
sizelimit=size_limit)
except ldap.LDAPError as e:
self.log.warning(
logger.warning(
"Error cancelling paged search: %s", e)
cookie = ''

View File

@@ -507,7 +507,7 @@ class IpaAdvise(admintool.AdminTool):
advise_api.finalize()
if not self.options.verbose:
# Do not print connection information by default
logger_name = r'ipalib\.plugins\.rpcclient'
logger_name = r'ipalib\.rpc'
root_logger.addFilter(Filter(logger_name, logging.WARNING))
# With no argument, print the list out and exit

View File

@@ -3,6 +3,8 @@
#
from datetime import datetime
import logging
import dns.name
import errno
import os
@@ -11,11 +13,13 @@ import stat
import ipalib.constants
from ipapython.dn import DN
from ipapython import ipa_log_manager, ipautil
from ipapython import ipautil
from ipaplatform.paths import paths
from ipaserver.dnssec.temp import TemporaryDirectory
logger = logging.getLogger(__name__)
time_bindfmt = '%Y%m%d%H%M%S'
# this daemon should run under ods:named user:group
@@ -31,14 +35,13 @@ class BINDMgr(object):
"""
def __init__(self, api):
self.api = api
self.log = ipa_log_manager.log_mgr.get_logger(self)
self.ldap_keys = {}
self.modified_zones = set()
def notify_zone(self, zone):
cmd = ['rndc', 'sign', zone.to_text()]
result = ipautil.run(cmd, capture_output=True)
self.log.info('%s', result.output_log)
logger.info('%s', result.output_log)
def dn2zone_name(self, dn):
"""cn=KSK-20140813162153Z-cede9e182fc4af76c4bddbc19123a565,cn=keys,idnsname=test,cn=dns,dc=ipa,dc=example"""
@@ -83,21 +86,24 @@ class BINDMgr(object):
self.modified_zones.add(zone)
zone_keys = self.ldap_keys.setdefault(zone, {})
if op == 'add':
self.log.info('Key metadata %s added to zone %s' % (attrs['dn'], zone))
logger.info('Key metadata %s added to zone %s',
attrs['dn'], zone)
zone_keys[uuid] = attrs
elif op == 'del':
self.log.info('Key metadata %s deleted from zone %s' % (attrs['dn'], zone))
logger.info('Key metadata %s deleted from zone %s',
attrs['dn'], zone)
zone_keys.pop(uuid)
elif op == 'mod':
self.log.info('Key metadata %s updated in zone %s' % (attrs['dn'], zone))
logger.info('Key metadata %s updated in zone %s',
attrs['dn'], zone)
zone_keys[uuid] = attrs
def install_key(self, zone, uuid, attrs, workdir):
"""Run dnssec-keyfromlabel on given LDAP object.
:returns: base file name of output files, e.g. Kaaa.test.+008+19719"""
self.log.info('attrs: %s', attrs)
logger.info('attrs: %s', attrs)
assert attrs.get('idnsseckeyzone', ['FALSE'])[0] == 'TRUE', \
'object %s is not a DNS zone key' % attrs['dn']
@@ -152,7 +158,7 @@ class BINDMgr(object):
return escaped[:-1]
def sync_zone(self, zone):
self.log.info('Synchronizing zone %s' % zone)
logger.info('Synchronizing zone %s', zone)
zone_path = os.path.join(paths.BIND_LDAP_DNS_ZONE_WORKDIR,
self.get_zone_dir_name(zone))
try:
@@ -166,11 +172,11 @@ class BINDMgr(object):
for prefix, dirs, files in os.walk(paths.DNSSEC_TOKENS_DIR, topdown=True):
for name in dirs:
fpath = os.path.join(prefix, name)
self.log.debug('Fixing directory permissions: %s', fpath)
logger.debug('Fixing directory permissions: %s', fpath)
os.chmod(fpath, DIR_PERM | stat.S_ISGID)
for name in files:
fpath = os.path.join(prefix, name)
self.log.debug('Fixing file permissions: %s', fpath)
logger.debug('Fixing file permissions: %s', fpath)
os.chmod(fpath, FILE_PERM)
# TODO: move out
@@ -199,8 +205,8 @@ class BINDMgr(object):
have old metadata objects and DNSSEC disabled. Such zones must be
ignored to prevent errors while calling dnssec-keyfromlabel or rndc.
"""
self.log.debug('Key metadata in LDAP: %s' % self.ldap_keys)
self.log.debug('Zones modified but skipped during bindmgr.sync: %s',
logger.debug('Key metadata in LDAP: %s', self.ldap_keys)
logger.debug('Zones modified but skipped during bindmgr.sync: %s',
self.modified_zones - dnssec_zones)
for zone in self.modified_zones.intersection(dnssec_zones):
self.sync_zone(zone)

View File

@@ -2,6 +2,8 @@
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import logging
import ldap.dn
import os
@@ -14,6 +16,8 @@ from ipaserver.dnssec.syncrepl import SyncReplConsumer
from ipaserver.dnssec.odsmgr import ODSMgr
from ipaserver.dnssec.bindmgr import BINDMgr
logger = logging.getLogger(__name__)
SIGNING_ATTR = 'idnsSecInlineSigning'
OBJCLASS_ATTR = 'objectClass'
@@ -109,7 +113,8 @@ class KeySyncer(SyncReplConsumer):
self.hsm_master_sync()
def syncrepl_refreshdone(self):
self.log.info('Initial LDAP dump is done, sychronizing with ODS and BIND')
logger.info('Initial LDAP dump is done, sychronizing with ODS and '
'BIND')
self.init_done = True
self.ods_sync()
self.hsm_replica_sync()

View File

@@ -6,6 +6,7 @@ from __future__ import print_function
from binascii import hexlify
import collections
import logging
from pprint import pprint
import ipalib
@@ -22,6 +23,9 @@ from ipaserver.dnssec.abshsm import (
from ipaserver import p11helper as _ipap11helper
import uuid
logger = logging.getLogger(__name__)
def uri_escape(val):
"""convert val to %-notation suitable for ID component in URI"""
assert len(val) > 0, "zero-length URI component detected"
@@ -119,7 +123,7 @@ class Key(collections.MutableMapping):
self._delentry = None # indicates that object was deleted
self.ldap = ldap
self.ldapkeydb = ldapkeydb
self.log = ldap.log.getChild(__name__)
self.log = logger.getChild(__name__)
def __assert_not_deleted(self):
assert self.entry and not self._delentry, (

View File

@@ -3,6 +3,8 @@
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import logging
import dns.name
try:
from xml.etree import cElementTree as etree
@@ -11,6 +13,8 @@ except ImportError:
from ipapython import ipa_log_manager, ipautil
logger = logging.getLogger(__name__)
# hack: zone object UUID is stored as path to imaginary zone file
ENTRYUUID_PREFIX = "/var/lib/ipa/dns/zone/entryUUID/"
ENTRYUUID_PREFIX_LEN = len(ENTRYUUID_PREFIX)
@@ -21,7 +25,6 @@ class ZoneListReader(object):
self.names = set() # dns.name
self.uuids = set() # UUID strings
self.mapping = dict() # {UUID: dns.name}
self.log = ipa_log_manager.log_mgr.get_logger(self)
def _add_zone(self, name, zid):
"""Add zone & UUID to internal structures.
@@ -125,7 +128,6 @@ class ODSMgr(object):
has to be solved seperatelly.
"""
def __init__(self):
self.log = ipa_log_manager.log_mgr.get_logger(self)
self.zl_ldap = LDAPZoneListReader()
def ksmutil(self, params):
@@ -146,7 +148,7 @@ class ODSMgr(object):
zone_path = '%s%s' % (ENTRYUUID_PREFIX, uuid)
cmd = ['zone', 'add', '--zone', str(name), '--input', zone_path]
output = self.ksmutil(cmd)
self.log.info(output)
logger.info('%s', output)
self.notify_enforcer()
def del_ods_zone(self, name):
@@ -157,19 +159,19 @@ class ODSMgr(object):
name = dns.name.root
cmd = ['zone', 'delete', '--zone', str(name)]
output = self.ksmutil(cmd)
self.log.info(output)
logger.info('%s', output)
self.notify_enforcer()
self.cleanup_signer(name)
def notify_enforcer(self):
cmd = ['notify']
output = self.ksmutil(cmd)
self.log.info(output)
logger.info('%s', output)
def cleanup_signer(self, zone_name):
cmd = ['ods-signer', 'ldap-cleanup', str(zone_name)]
output = ipautil.run(cmd, capture_output=True)
self.log.info(output)
logger.info('%s', output)
def ldap_event(self, op, uuid, attrs):
"""Record single LDAP event - zone addition or deletion.
@@ -178,16 +180,16 @@ class ODSMgr(object):
self.sync() have to be called to synchronize change to ODS."""
assert op == 'add' or op == 'del'
self.zl_ldap.process_ipa_zone(op, uuid, attrs)
self.log.debug("LDAP zones: %s", self.zl_ldap.mapping)
logger.debug("LDAP zones: %s", self.zl_ldap.mapping)
def sync(self):
"""Synchronize list of zones in LDAP with ODS."""
zl_ods = self.get_ods_zonelist()
self.log.debug("ODS zones: %s", zl_ods.mapping)
logger.debug("ODS zones: %s", zl_ods.mapping)
removed = self.diff_zl(zl_ods, self.zl_ldap)
self.log.info("Zones removed from LDAP: %s", removed)
logger.info("Zones removed from LDAP: %s", removed)
added = self.diff_zl(self.zl_ldap, zl_ods)
self.log.info("Zones added to LDAP: %s", added)
logger.info("Zones added to LDAP: %s", added)
for (uuid, name) in removed:
self.del_ods_zone(name)
for (uuid, name) in added:

View File

@@ -7,6 +7,8 @@ This script implements a syncrepl consumer which syncs data from server
to a local dict.
"""
import logging
# Import the python-ldap modules
import ldap
# Import specific classes from python-ldap
@@ -14,7 +16,7 @@ from ldap.cidict import cidict
from ldap.ldapobject import ReconnectLDAPObject
from ldap.syncrepl import SyncreplConsumer
from ipapython import ipa_log_manager
logger = logging.getLogger(__name__)
class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
@@ -23,7 +25,6 @@ class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
"""
def __init__(self, *args, **kwargs):
self.log = ipa_log_manager.log_mgr.get_logger(self)
# Initialise the LDAP Connection first
ldap.ldapobject.ReconnectLDAPObject.__init__(self, *args, **kwargs)
# Now prepare the data store
@@ -39,13 +40,13 @@ class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
def syncrepl_get_cookie(self):
if 'cookie' in self.__data:
cookie = self.__data['cookie']
self.log.debug('Current cookie is: %s', cookie)
logger.debug('Current cookie is: %s', cookie)
return cookie
else:
self.log.debug('Current cookie is: None (not received yet)')
logger.debug('Current cookie is: None (not received yet)')
def syncrepl_set_cookie(self, cookie):
self.log.debug('New cookie is: %s', cookie)
logger.debug('New cookie is: %s', cookie)
self.__data['cookie'] = cookie
def syncrepl_entry(self, dn, attributes, uuid):
@@ -63,7 +64,7 @@ class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
attributes['dn'] = dn
self.__data['uuids'][uuid] = attributes
# Debugging
self.log.debug('Detected %s of entry: %s %s', change_type, dn, uuid)
logger.debug('Detected %s of entry: %s %s', change_type, dn, uuid)
if change_type == 'modify':
self.application_sync(uuid, dn, attributes, previous_attributes)
else:
@@ -76,7 +77,7 @@ class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
for uuid in uuids:
attributes = self.__data['uuids'][uuid]
dn = attributes['dn']
self.log.debug('Detected deletion of entry: %s %s', dn, uuid)
logger.debug('Detected deletion of entry: %s %s', dn, uuid)
self.application_del(uuid, dn, attributes)
del self.__data['uuids'][uuid]
@@ -99,17 +100,17 @@ class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
self.__presentUUIDs[uuid] = True
def application_add(self, uuid, dn, attributes):
self.log.info('Performing application add for: %s %s', dn, uuid)
self.log.debug('New attributes: %s', attributes)
logger.info('Performing application add for: %s %s', dn, uuid)
logger.debug('New attributes: %s', attributes)
return True
def application_sync(self, uuid, dn, attributes, previous_attributes):
self.log.info('Performing application sync for: %s %s', dn, uuid)
self.log.debug('Old attributes: %s', previous_attributes)
self.log.debug('New attributes: %s', attributes)
logger.info('Performing application sync for: %s %s', dn, uuid)
logger.debug('Old attributes: %s', previous_attributes)
logger.debug('New attributes: %s', attributes)
return True
def application_del(self, uuid, dn, previous_attributes):
self.log.info('Performing application delete for: %s %s', dn, uuid)
self.log.debug('Old attributes: %s', previous_attributes)
logger.info('Performing application delete for: %s %s', dn, uuid)
logger.debug('Old attributes: %s', previous_attributes)
return True

View File

@@ -22,6 +22,8 @@
from __future__ import print_function
import base64
import logging
import dbus
import ldap
import os
@@ -54,8 +56,7 @@ from ipapython import ipautil
from ipapython import ipaldap
from ipapython.certdb import get_ca_nickname
from ipapython.dn import DN
from ipapython.ipa_log_manager import log_mgr,\
standard_logging_setup, root_logger
from ipapython.ipa_log_manager import standard_logging_setup, root_logger
from ipaserver.secrets.kem import IPAKEMKeys
from ipaserver.install import certs
@@ -68,6 +69,8 @@ from ipaserver.install import sysupgrade
from ipaserver.install.dogtaginstance import DogtagInstance
from ipaserver.plugins import ldap2
logger = logging.getLogger(__name__)
# We need to reset the template because the CA uses the regular boot
# information
INF_TEMPLATE = """
@@ -306,7 +309,6 @@ class CAInstance(DogtagInstance):
self.canickname = None
self.ra_cert = None
self.requestId = None
self.log = log_mgr.get_logger(self)
self.no_db_setup = False
self.keytab = os.path.join(
paths.PKI_TOMCAT, self.service_prefix + '.keytab')
@@ -625,7 +627,7 @@ class CAInstance(DogtagInstance):
shutil.move(paths.CA_BACKUP_KEYS_P12,
paths.CACERT_P12)
self.log.debug("completed creating ca instance")
logger.debug("completed creating ca instance")
def backup_config(self):
try:
@@ -982,21 +984,21 @@ class CAInstance(DogtagInstance):
cmonger.stop()
# remove CRL files
self.log.info("Remove old CRL files")
logger.info("Remove old CRL files")
try:
for f in get_crl_files():
self.log.debug("Remove %s", f)
logger.debug("Remove %s", f)
installutils.remove_file(f)
except OSError as e:
self.log.warning("Error while removing old CRL files: %s", e)
logger.warning("Error while removing old CRL files: %s", e)
# remove CRL directory
self.log.info("Remove CRL directory")
logger.info("Remove CRL directory")
if os.path.exists(paths.PKI_CA_PUBLISH_DIR):
try:
shutil.rmtree(paths.PKI_CA_PUBLISH_DIR)
except OSError as e:
self.log.warning("Error while removing CRL publish "
logger.warning("Error while removing CRL publish "
"directory: %s", e)
def unconfigure_certmonger_renewal_guard(self):
@@ -1026,7 +1028,7 @@ class CAInstance(DogtagInstance):
post_command='renew_ra_cert',
storage='FILE')
except RuntimeError as e:
self.log.error(
logger.error(
"certmonger failed to start tracking certificate: %s", e)
def stop_tracking_certificates(self):
@@ -1061,7 +1063,7 @@ class CAInstance(DogtagInstance):
'policyset.caLogSigningSet.2.default.params.range',
separator='='
)
self.log.debug(
logger.debug(
'caSignedLogCert.cfg profile validity range is %s', cert_range)
if cert_range == "180":
installutils.set_directive(
@@ -1078,7 +1080,7 @@ class CAInstance(DogtagInstance):
quotes=False,
separator='='
)
self.log.debug(
logger.debug(
'updated caSignedLogCert.cfg profile validity range to 720')
return True
return False
@@ -1264,7 +1266,7 @@ class CAInstance(DogtagInstance):
filter='(objectclass=ipaca)',
attrs_list=['cn', 'ipacaid'],
)
add_lightweight_ca_tracking_requests(self.log, lwcas)
add_lightweight_ca_tracking_requests(lwcas)
except errors.NotFound:
# shouldn't happen, but don't fail if it does
root_logger.warning(
@@ -1791,7 +1793,7 @@ def ensure_default_caacl():
certprofile=(u'caIPAserviceCert',))
def add_lightweight_ca_tracking_requests(logger, lwcas):
def add_lightweight_ca_tracking_requests(lwcas):
"""Add tracking requests for the given lightweight CAs.
The entries must have the 'cn' and 'ipacaid' attributes.

View File

@@ -18,6 +18,8 @@
#
import base64
import logging
import ldap
import os
import shutil
@@ -39,7 +41,8 @@ from ipaserver.install import service
from ipaserver.install import installutils
from ipaserver.install import replication
from ipaserver.install.installutils import stopped_service
from ipapython.ipa_log_manager import log_mgr
logger = logging.getLogger(__name__)
def get_security_domain():
@@ -115,8 +118,6 @@ class DogtagInstance(service.Service):
self.subject_base = None
self.nss_db = nss_db
self.log = log_mgr.get_logger(self)
def is_installed(self):
"""
Determine if subsystem instance has been installed.
@@ -138,7 +139,7 @@ class DogtagInstance(service.Service):
"-f", cfg_file]
with open(cfg_file) as f:
self.log.debug(
logger.debug(
'Contents of pkispawn configuration file (%s):\n%s',
cfg_file, ipautil.nolog_replace(f.read(), nolog_list))
@@ -165,8 +166,8 @@ class DogtagInstance(service.Service):
try:
self.stop('pki-tomcat')
except Exception:
self.log.debug(traceback.format_exc())
self.log.critical(
logger.debug("%s", traceback.format_exc())
logger.critical(
"Failed to stop the Dogtag instance."
"See the installation log for details.")
@@ -223,7 +224,7 @@ class DogtagInstance(service.Service):
"-i", 'pki-tomcat',
"-s", self.subsystem])
except ipautil.CalledProcessError as e:
self.log.critical("failed to uninstall %s instance %s",
logger.critical("failed to uninstall %s instance %s",
self.subsystem, e)
def http_proxy(self):
@@ -267,7 +268,7 @@ class DogtagInstance(service.Service):
try:
return certmonger.get_pin('internal')
except IOError as e:
self.log.debug(
logger.debug(
'Unable to determine PIN for the Dogtag instance: %s', e)
raise RuntimeError(e)
@@ -286,7 +287,7 @@ class DogtagInstance(service.Service):
post_command='renew_ca_cert "%s"' % nickname,
)
except RuntimeError as e:
self.log.error(
logger.error(
"certmonger failed to start tracking certificate: %s", e)
def track_servercert(self):
@@ -305,8 +306,8 @@ class DogtagInstance(service.Service):
pre_command='stop_pkicad',
post_command='renew_ca_cert "%s"' % self.server_cert_name)
except RuntimeError as e:
self.log.error(
"certmonger failed to start tracking certificate: %s" % e)
logger.error(
"certmonger failed to start tracking certificate: %s", e)
def stop_tracking_certificates(self, stop_certmonger=True):
"""Stop tracking our certificates. Called on uninstall.
@@ -328,7 +329,7 @@ class DogtagInstance(service.Service):
certmonger.stop_tracking(
self.nss_db, nickname=nickname)
except RuntimeError as e:
self.log.error(
logger.error(
"certmonger failed to stop tracking certificate: %s", e)
if stop_certmonger:
@@ -358,7 +359,7 @@ class DogtagInstance(service.Service):
Get the certificate for the admin user by checking the ldap entry
for the user. There should be only one certificate per user.
"""
self.log.debug('Trying to find the certificate for the admin user')
logger.debug('Trying to find the certificate for the admin user')
conn = None
try:
@@ -377,11 +378,11 @@ class DogtagInstance(service.Service):
return base64.b64encode(admin_cert)
def handle_setup_error(self, e):
self.log.critical("Failed to configure %s instance: %s"
% (self.subsystem, e))
self.log.critical("See the installation logs and the following "
logger.critical("Failed to configure %s instance: %s",
self.subsystem, e)
logger.critical("See the installation logs and the following "
"files/directories for more information:")
self.log.critical(" %s" % paths.TOMCAT_TOPLEVEL_DIR)
logger.critical(" %s", paths.TOMCAT_TOPLEVEL_DIR)
raise RuntimeError("%s configuration failed." % self.subsystem)

View File

@@ -17,6 +17,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import os
import shutil
import tempfile
@@ -48,6 +49,8 @@ from ipaplatform.tasks import tasks
ISO8601_DATETIME_FMT = '%Y-%m-%dT%H:%M:%S'
logger = logging.getLogger(__name__)
"""
A test gpg can be generated like this:
@@ -274,7 +277,7 @@ class Backup(admintool.AdminTool):
api.bootstrap(in_server=True, context='backup', confdir=paths.ETC_IPA)
api.finalize()
self.log.info("Preparing backup on %s", api.env.host)
logger.info("Preparing backup on %s", api.env.host)
pent = pwd.getpwnam(constants.DS_USER)
@@ -302,10 +305,10 @@ class Backup(admintool.AdminTool):
self.create_header(options.data_only)
if options.data_only:
if not options.online:
self.log.info('Stopping Directory Server')
logger.info('Stopping Directory Server')
dirsrv.stop(capture_output=False)
else:
self.log.info('Stopping IPA services')
logger.info('Stopping IPA services')
run(['ipactl', 'stop'])
instance = installutils.realm_to_serverid(api.env.realm)
@@ -325,17 +328,17 @@ class Backup(admintool.AdminTool):
if options.data_only:
if not options.online:
self.log.info('Starting Directory Server')
logger.info('Starting Directory Server')
dirsrv.start(capture_output=False)
else:
self.log.info('Starting IPA service')
logger.info('Starting IPA service')
run(['ipactl', 'start'])
finally:
try:
os.chdir(cwd)
except Exception as e:
self.log.error('Cannot change directory to %s: %s' % (cwd, e))
logger.error('Cannot change directory to %s: %s', cwd, e)
shutil.rmtree(self.top_dir)
@@ -376,8 +379,8 @@ class Backup(admintool.AdminTool):
try:
self._conn.external_bind()
except Exception as e:
self.log.error("Unable to bind to LDAP server %s: %s" %
(self._conn.host, e))
logger.error("Unable to bind to LDAP server %s: %s",
self._conn.host, e)
return self._conn
@@ -391,7 +394,7 @@ class Backup(admintool.AdminTool):
For SELinux reasons this writes out to the 389-ds backup location
and we move it.
'''
self.log.info('Backing up %s in %s to LDIF' % (backend, instance))
logger.info('Backing up %s in %s to LDIF', backend, instance)
cn = time.strftime('export_%Y_%m_%d_%H_%M_%S')
dn = DN(('cn', cn), ('cn', 'export'), ('cn', 'tasks'), ('cn', 'config'))
@@ -421,7 +424,7 @@ class Backup(admintool.AdminTool):
raise admintool.ScriptError('Unable to add LDIF task: %s'
% e)
self.log.info("Waiting for LDIF to finish")
logger.info("Waiting for LDIF to finish")
wait_for_task(conn, dn)
else:
args = [paths.DB2LDIF,
@@ -431,7 +434,7 @@ class Backup(admintool.AdminTool):
'-a', ldiffile]
result = run(args, raiseonerr=False)
if result.returncode != 0:
self.log.critical('db2ldif failed: %s', result.error_log)
logger.critical('db2ldif failed: %s', result.error_log)
# Move the LDIF backup to our location
shutil.move(ldiffile, os.path.join(self.dir, ldifname))
@@ -443,7 +446,7 @@ class Backup(admintool.AdminTool):
If executed online create a task and wait for it to complete.
'''
self.log.info('Backing up %s' % instance)
logger.info('Backing up %s', instance)
cn = time.strftime('backup_%Y_%m_%d_%H_%M_%S')
dn = DN(('cn', cn), ('cn', 'backup'), ('cn', 'tasks'), ('cn', 'config'))
@@ -468,13 +471,13 @@ class Backup(admintool.AdminTool):
raise admintool.ScriptError('Unable to to add backup task: %s'
% e)
self.log.info("Waiting for BAK to finish")
logger.info("Waiting for BAK to finish")
wait_for_task(conn, dn)
else:
args = [paths.DB2BAK, bakdir, '-Z', instance]
result = run(args, raiseonerr=False)
if result.returncode != 0:
self.log.critical('db2bak failed: %s', result.error_log)
logger.critical('db2bak failed: %s', result.error_log)
shutil.move(bakdir, self.dir)
@@ -486,7 +489,7 @@ class Backup(admintool.AdminTool):
tarfile = os.path.join(self.dir, 'files.tar')
self.log.info("Backing up files")
logger.info("Backing up files")
args = ['tar',
'--exclude=/var/lib/ipa/backup',
'--xattrs',
@@ -564,11 +567,11 @@ class Backup(admintool.AdminTool):
conn = self.get_connection()
services = conn.get_entries(dn, conn.SCOPE_ONELEVEL)
except errors.NetworkError:
self.log.critical(
logger.critical(
"Unable to obtain list of master services, continuing anyway")
except Exception as e:
self.log.error("Failed to read services from '%s': %s" %
(conn.host, e))
logger.error("Failed to read services from '%s': %s",
conn.host, e)
else:
services_cns = [s.single_value['cn'] for s in services]
@@ -615,9 +618,9 @@ class Backup(admintool.AdminTool):
(result.returncode, result.error_log))
if encrypt:
self.log.info('Encrypting %s' % filename)
logger.info('Encrypting %s', filename)
filename = encrypt_file(filename, keyring)
shutil.move(self.header, backup_dir)
self.log.info('Backed up to %s', backup_dir)
logger.info('Backed up to %s', backup_dir)

View File

@@ -19,6 +19,7 @@
from __future__ import print_function
import logging
import os
from optparse import OptionGroup # pylint: disable=deprecated-module
from cryptography.hazmat.primitives import serialization
@@ -35,6 +36,8 @@ from ipaplatform.paths import paths
from ipalib import api, errors, x509
from ipaserver.install import certs, cainstance, installutils
logger = logging.getLogger(__name__)
class CACertManage(admintool.AdminTool):
command_name = 'ipa-cacert-manage'
@@ -154,7 +157,7 @@ class CACertManage(admintool.AdminTool):
if self.request_id is None:
raise admintool.ScriptError(
"CA certificate is not tracked by certmonger")
self.log.debug(
logger.debug(
"Found certmonger request id %r", self.request_id)
db = certs.CertDB(api.env.realm, nssdir=paths.PKI_TOMCAT_ALIAS_DIR)
@@ -309,7 +312,7 @@ class CACertManage(admintool.AdminTool):
def resubmit_request(self, ca='dogtag-ipa-ca-renew-agent', profile=''):
timeout = api.env.startup_timeout + 60
self.log.debug("resubmitting certmonger request '%s'", self.request_id)
logger.debug("resubmitting certmonger request '%s'", self.request_id)
certmonger.resubmit_request(self.request_id, ca=ca, profile=profile)
try:
state = certmonger.wait_for_request(self.request_id, timeout)
@@ -323,7 +326,7 @@ class CACertManage(admintool.AdminTool):
"Error resubmitting certmonger request '%s', "
"please check the request manually" % self.request_id)
self.log.debug("modifying certmonger request '%s'", self.request_id)
logger.debug("modifying certmonger request '%s'", self.request_id)
certmonger.modify(self.request_id,
ca='dogtag-ipa-ca-renew-agent',
profile='')

View File

@@ -20,6 +20,7 @@
from __future__ import print_function
import logging
import sys
import tempfile
from optparse import SUPPRESS_HELP # pylint: disable=deprecated-module
@@ -40,6 +41,8 @@ from ipaserver.install import dogtaginstance
from ipaserver.install import kra
from ipaserver.install.installutils import ReplicaConfig
logger = logging.getLogger(__name__)
class KRAInstall(admintool.AdminTool):
@@ -214,7 +217,7 @@ class KRAInstaller(KRAInstall):
try:
kra.install(api, config, self.options)
except:
self.log.error(dedent(self.FAIL_MESSAGE))
logger.error('%s', dedent(self.FAIL_MESSAGE))
raise
api.Backend.ldap2.disconnect()

View File

@@ -25,6 +25,7 @@
from __future__ import print_function
import logging
import os
import six
@@ -39,6 +40,8 @@ from ipaserver.install.upgradeinstance import IPAUpgrade
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
class LDAPUpdater(admintool.AdminTool):
command_name = 'ipa-ldap-updater'
@@ -69,7 +72,7 @@ class LDAPUpdater(admintool.AdminTool):
self.files = self.args
if not (self.files or options.schema_files):
self.log.info("To execute overall IPA upgrade please use "
logger.info("To execute overall IPA upgrade please use "
"'ipa-server-upgrade' command")
raise admintool.ScriptError("No update files or schema file were "
"specified")
@@ -117,9 +120,9 @@ class LDAPUpdater_Upgrade(LDAPUpdater):
raise admintool.ScriptError('IPA upgrade failed.', 1)
else:
if upgrade.modified:
self.log.info('Update complete')
logger.info('Update complete')
else:
self.log.info('Update complete, no data were modified')
logger.info('Update complete, no data were modified')
api.Backend.ldap2.disconnect()
@@ -149,8 +152,8 @@ class LDAPUpdater_NonUpgrade(LDAPUpdater):
modified = ld.update(self.files) or modified
if modified:
self.log.info('Update complete')
logger.info('Update complete')
else:
self.log.info('Update complete, no data were modified')
logger.info('Update complete, no data were modified')
api.Backend.ldap2.disconnect()

View File

@@ -20,6 +20,7 @@
import abc
import base64
import datetime
import logging
import os
import uuid
@@ -45,6 +46,8 @@ if six.PY3:
unicode = str
long = int
logger = logging.getLogger(__name__)
class ValidationError(Exception):
pass
@@ -538,9 +541,9 @@ class OTPTokenImport(admintool.AdminTool):
try:
api.Command.otptoken_add(keypkg.id, no_qrcode=True, **keypkg.options)
except Exception as e:
self.log.warning("Error adding token: %s", e)
logger.warning("Error adding token: %s", e)
else:
self.log.info("Added token: %s", keypkg.id)
logger.info("Added token: %s", keypkg.id)
keypkg.remove()
finally:
api.Backend.ldap2.disconnect()

View File

@@ -4,11 +4,15 @@
from __future__ import print_function
import logging
from ipalib import api
from ipaplatform.paths import paths
from ipapython.admintool import AdminTool
from ipaserver.install.krbinstance import KrbInstance, is_pkinit_enabled
logger = logging.getLogger(__name__)
class PKINITManage(AdminTool):
command_name = "ipa-pkinit-manage"
@@ -64,7 +68,7 @@ class PKINITManage(AdminTool):
krb.stop_tracking_certs()
except RuntimeError as e:
if ca_enabled:
self.log.warning(
logger.warning(
"Failed to stop tracking certificates: %s", e)
krb.enable_ssl()
@@ -76,8 +80,8 @@ class PKINITManage(AdminTool):
def enable(self):
if not api.Command.ca_is_enabled()['result']:
self.log.error("Cannot enable PKINIT in CA-less deployment")
self.log.error("Use ipa-server-certinstall to install KDC "
logger.error("Cannot enable PKINIT in CA-less deployment")
logger.error("Use ipa-server-certinstall to install KDC "
"certificate manually")
raise RuntimeError("Cannot enable PKINIT in CA-less deployment")

View File

@@ -21,6 +21,7 @@
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import shutil
import tempfile
@@ -52,6 +53,8 @@ from ipalib import errors
from ipaplatform.paths import paths
from ipalib.constants import DOMAIN_LEVEL_0
logger = logging.getLogger(__name__)
UNSUPPORTED_DOMAIN_LEVEL_TEMPLATE = """
Replica creation using '{command_name}' to generate replica file
is supported only in {domain_level}-level IPA domain.
@@ -242,7 +245,7 @@ class ReplicaPrepare(admintool.AdminTool):
if isinstance(e, installutils.HostLookupError):
if not options.ip_addresses:
if dns_container_exists(api.env.basedn):
self.log.info('You might use the --ip-address option '
logger.info('You might use the --ip-address option '
'to create a DNS entry if the DNS zone '
'is managed by IPA.')
raise
@@ -254,7 +257,7 @@ class ReplicaPrepare(admintool.AdminTool):
if options.ip_addresses:
if not dns_container_exists(api.env.basedn):
self.log.error(
logger.error(
"It is not possible to add a DNS record automatically "
"because DNS is not managed by IPA. Please create DNS "
"record manually and then omit --ip-address option.")
@@ -266,9 +269,9 @@ class ReplicaPrepare(admintool.AdminTool):
_host, zone = self.replica_fqdn.split('.', 1)
if not bindinstance.dns_zone_exists(zone, api=api):
self.log.error("DNS zone %s does not exist in IPA managed DNS "
logger.error("DNS zone %s does not exist in IPA managed DNS "
"server. Either create DNS zone or omit "
"--ip-address option." % zone)
"--ip-address option.", zone)
raise admintool.ScriptError("Cannot add DNS record")
self.http_pin = self.dirsrv_pin = None
@@ -312,7 +315,7 @@ class ReplicaPrepare(admintool.AdminTool):
options = self.options
super(ReplicaPrepare, self).run()
self.log.info("Preparing replica for %s from %s",
logger.info("Preparing replica for %s from %s",
self.replica_fqdn, api.env.host)
enable_replication_version_checking(
api.env.realm,
@@ -352,7 +355,7 @@ class ReplicaPrepare(admintool.AdminTool):
fd.write("%s\n" % (self.dirsrv_pin or ''))
if options.dirsrv_cert_files:
self.log.info("Copying SSL certificate for the Directory Server")
logger.info("Copying SSL certificate for the Directory Server")
self.copy_info_file(self.dirsrv_pkcs12_file.name, "dscert.p12")
else:
if ipautil.file_exists(options.ca_file):
@@ -366,15 +369,15 @@ class ReplicaPrepare(admintool.AdminTool):
raise admintool.ScriptError("Root CA PKCS#12 not "
"found in %s" % options.ca_file)
self.log.info(
logger.info(
"Creating SSL certificate for the Directory Server")
self.export_certdb("dscert", passwd_fname)
if not options.dirsrv_cert_files:
self.log.info(
logger.info(
"Creating SSL certificate for the dogtag Directory Server")
self.export_certdb("dogtagcert", passwd_fname)
self.log.info("Saving dogtag Directory Server port")
logger.info("Saving dogtag Directory Server port")
port_fname = os.path.join(
self.dir, "dogtag_directory_port.txt")
with open(port_fname, "w") as fd:
@@ -388,17 +391,17 @@ class ReplicaPrepare(admintool.AdminTool):
fd.write("%s\n" % (self.http_pin or ''))
if options.http_cert_files:
self.log.info("Copying SSL certificate for the Web Server")
logger.info("Copying SSL certificate for the Web Server")
self.copy_info_file(self.http_pkcs12_file.name, "httpcert.p12")
else:
self.log.info("Creating SSL certificate for the Web Server")
logger.info("Creating SSL certificate for the Web Server")
self.export_certdb("httpcert", passwd_fname)
self.log.info("Exporting RA certificate")
logger.info("Exporting RA certificate")
self.export_ra_pkcs12()
def copy_misc_files(self):
self.log.info("Copying additional files")
logger.info("Copying additional files")
cacert_filename = paths.CACERT_PEM
if ipautil.file_exists(cacert_filename):
@@ -406,13 +409,13 @@ class ReplicaPrepare(admintool.AdminTool):
self.copy_info_file(paths.IPA_DEFAULT_CONF, "default.conf")
def retrieve_ca_certs(self):
self.log.info("Retrieving CA certificates")
logger.info("Retrieving CA certificates")
dest = os.path.join(self.dir, "ca.crt")
install_ca_cert(api.Backend.ldap2, api.env.basedn,
api.env.realm, paths.IPA_CA_CRT, destfile=dest)
def save_config(self):
self.log.info("Finalizing configuration")
logger.info("Finalizing configuration")
config = SafeConfigParser()
config.add_section("realm")
@@ -430,7 +433,7 @@ class ReplicaPrepare(admintool.AdminTool):
replicafile = paths.REPLICA_INFO_TEMPLATE % self.replica_fqdn
encfile = "%s.gpg" % replicafile
self.log.info("Packaging replica information into %s", encfile)
logger.info("Packaging replica information into %s", encfile)
ipautil.run(
[paths.TAR, "cf", replicafile, "-C", self.top_dir, "realm_info"])
installutils.encrypt_file(
@@ -443,11 +446,11 @@ class ReplicaPrepare(admintool.AdminTool):
def add_dns_records(self):
options = self.options
self.log.info("Adding DNS records for %s", self.replica_fqdn)
logger.info("Adding DNS records for %s", self.replica_fqdn)
name, domain = self.replica_fqdn.split(".", 1)
for reverse_zone in options.reverse_zones:
self.log.info("Adding reverse zone %s", reverse_zone)
logger.info("Adding reverse zone %s", reverse_zone)
add_zone(reverse_zone)
for ip in options.ip_addresses:
@@ -461,7 +464,7 @@ class ReplicaPrepare(admintool.AdminTool):
if not options.no_reverse:
reverse_zone = bindinstance.find_reverse_zone(ip)
if reverse_zone is None:
self.log.warning(
logger.warning(
"Could not find any IPA managed reverse zone. "
"Not creating PTR records")
return
@@ -486,7 +489,7 @@ class ReplicaPrepare(admintool.AdminTool):
except exceptions:
return False
except Exception as e:
self.log.warning('Exception while waiting for DNS record: %s: %s',
logger.warning('Exception while waiting for DNS record: %s: %s',
type(e).__name__, e)
return True
@@ -499,10 +502,10 @@ class ReplicaPrepare(admintool.AdminTool):
replica_fqdn += '.'
if self.check_dns(replica_fqdn):
self.log.debug('%s A/AAAA record resolvable', replica_fqdn)
logger.debug('%s A/AAAA record resolvable', replica_fqdn)
return
self.log.info('Waiting for %s A or AAAA record to be resolvable',
logger.info('Waiting for %s A or AAAA record to be resolvable',
replica_fqdn)
print('This can be safely interrupted (Ctrl+C)')
@@ -510,9 +513,9 @@ class ReplicaPrepare(admintool.AdminTool):
while not self.check_dns(replica_fqdn):
time.sleep(1)
except KeyboardInterrupt:
self.log.info('Interrupted')
logger.info('Interrupted')
else:
self.log.debug('%s A/AAAA record resolvable', replica_fqdn)
logger.debug('%s A/AAAA record resolvable', replica_fqdn)
def copy_info_file(self, source, dest):
"""Copy a file into the info directory
@@ -521,7 +524,7 @@ class ReplicaPrepare(admintool.AdminTool):
:param dest: The destination file (relative to the info directory)
"""
dest_path = os.path.join(self.dir, dest)
self.log.debug('Copying %s to %s', source, dest_path)
logger.debug('Copying %s to %s', source, dest_path)
try:
shutil.copy(source, dest_path)
except IOError as e:
@@ -558,7 +561,7 @@ class ReplicaPrepare(admintool.AdminTool):
try:
db.export_pkcs12(pkcs12_fname, passwd_fname, nickname)
except ipautil.CalledProcessError as e:
self.log.info("error exporting Server certificate: %s", e)
logger.info("error exporting Server certificate: %s", e)
installutils.remove_file(pkcs12_fname)
installutils.remove_file(passwd_fname)
@@ -619,7 +622,8 @@ class ReplicaPrepare(admintool.AdminTool):
domain_level = dsinstance.get_domain_level(api)
if domain_level > DOMAIN_LEVEL_0:
self.log.error(
logger.error(
'%s',
UNSUPPORTED_DOMAIN_LEVEL_TEMPLATE.format(
command_name=self.command_name,
domain_level=DOMAIN_LEVEL_0,

View File

@@ -17,6 +17,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import os
import shutil
import tempfile
@@ -57,6 +58,9 @@ try:
except ImportError:
adtrustinstance = None
logger = logging.getLogger(__name__)
def recursive_chown(path, uid, gid):
'''
Change ownership of all files and directories in a path.
@@ -102,7 +106,7 @@ def decrypt_file(tmpdir, filename, keyring):
class RemoveRUVParser(ldif.LDIFParser):
def __init__(self, input_file, writer, logger):
def __init__(self, input_file, writer):
ldif.LDIFParser.__init__(self, input_file)
self.writer = writer
self.log = logger
@@ -220,7 +224,7 @@ class Restore(admintool.AdminTool):
if not os.path.isabs(self.backup_dir):
self.backup_dir = os.path.join(paths.IPA_BACKUP_DIR, self.backup_dir)
self.log.info("Preparing restore from %s on %s",
logger.info("Preparing restore from %s on %s",
self.backup_dir, FQDN)
self.header = os.path.join(self.backup_dir, 'header')
@@ -281,8 +285,8 @@ class Restore(admintool.AdminTool):
raise admintool.ScriptError(
"Cannot restore a data backup into an empty system")
self.log.info("Performing %s restore from %s backup" %
(restore_type, self.backup_type))
logger.info("Performing %s restore from %s backup",
restore_type, self.backup_type)
if self.backup_host != FQDN:
raise admintool.ScriptError(
@@ -290,11 +294,11 @@ class Restore(admintool.AdminTool):
(FQDN, self.backup_host))
if self.backup_ipa_version != str(version.VERSION):
self.log.warning(
logger.warning(
"Restoring data from a different release of IPA.\n"
"Data is version %s.\n"
"Server is running %s." %
(self.backup_ipa_version, str(version.VERSION)))
"Server is running %s.",
self.backup_ipa_version, str(version.VERSION))
if (not options.unattended and
not user_input("Continue to restore?", False)):
raise admintool.ScriptError("Aborted")
@@ -349,30 +353,30 @@ class Restore(admintool.AdminTool):
not user_input("Restoring data will overwrite existing live data. Continue to restore?", False)):
raise admintool.ScriptError("Aborted")
self.log.info(
logger.info(
"Each master will individually need to be re-initialized or")
self.log.info(
logger.info(
"re-created from this one. The replication agreements on")
self.log.info(
logger.info(
"masters running IPA 3.1 or earlier will need to be manually")
self.log.info(
logger.info(
"re-enabled. See the man page for details.")
self.log.info("Disabling all replication.")
logger.info("Disabling all replication.")
self.disable_agreements()
if restore_type != 'FULL':
if not options.online:
self.log.info('Stopping Directory Server')
logger.info('Stopping Directory Server')
dirsrv.stop(capture_output=False)
else:
self.log.info('Starting Directory Server')
logger.info('Starting Directory Server')
dirsrv.start(capture_output=False)
else:
self.log.info('Stopping IPA services')
logger.info('Stopping IPA services')
result = run(['ipactl', 'stop'], raiseonerr=False)
if result.returncode not in [0, 6]:
self.log.warning('Stopping IPA failed: %s' % result.error_log)
logger.warning('Stopping IPA failed: %s', result.error_log)
self.restore_selinux_booleans()
@@ -394,7 +398,7 @@ class Restore(admintool.AdminTool):
if restore_type != 'FULL':
if not options.online:
self.log.info('Starting Directory Server')
logger.info('Starting Directory Server')
dirsrv.start(capture_output=False)
else:
# restore access controll configuration
@@ -406,12 +410,12 @@ class Restore(admintool.AdminTool):
services.knownservices.pki_tomcatd.enable()
services.knownservices.pki_tomcatd.disable()
self.log.info('Restarting GSS-proxy')
logger.info('Restarting GSS-proxy')
gssproxy = services.service('gssproxy', api)
gssproxy.reload_or_restart()
self.log.info('Starting IPA services')
logger.info('Starting IPA services')
run(['ipactl', 'start'])
self.log.info('Restarting SSSD')
logger.info('Restarting SSSD')
sssd = services.service('sssd', api)
sssd.restart()
http.remove_httpd_ccaches()
@@ -421,7 +425,7 @@ class Restore(admintool.AdminTool):
try:
os.chdir(cwd)
except Exception as e:
self.log.error('Cannot change directory to %s: %s' % (cwd, e))
logger.error('Cannot change directory to %s: %s', cwd, e)
shutil.rmtree(self.top_dir)
@@ -459,7 +463,8 @@ class Restore(admintool.AdminTool):
try:
conn = self.get_connection()
except Exception as e:
self.log.error('Unable to get connection, skipping disabling agreements: %s' % e)
logger.error('Unable to get connection, skipping disabling '
'agreements: %s', e)
return
masters = []
dn = DN(('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
@@ -479,7 +484,8 @@ class Restore(admintool.AdminTool):
repl = ReplicationManager(api.env.realm, master,
self.dirman_password)
except Exception as e:
self.log.critical("Unable to disable agreement on %s: %s" % (master, e))
logger.critical("Unable to disable agreement on %s: %s",
master, e)
continue
master_dn = DN(('cn', master), ('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
@@ -496,7 +502,8 @@ class Restore(admintool.AdminTool):
for rep in host_entries]
for host in hosts:
self.log.info('Disabling replication agreement on %s to %s' % (master, host))
logger.info('Disabling replication agreement on %s to %s',
master, host)
repl.disable_agreement(host)
if 'CA' in services_cns:
@@ -504,14 +511,16 @@ class Restore(admintool.AdminTool):
repl = get_cs_replication_manager(api.env.realm, master,
self.dirman_password)
except Exception as e:
self.log.critical("Unable to disable agreement on %s: %s" % (master, e))
logger.critical("Unable to disable agreement on %s: %s",
master, e)
continue
host_entries = repl.find_ipa_replication_agreements()
hosts = [rep.single_value.get('nsds5replicahost')
for rep in host_entries]
for host in hosts:
self.log.info('Disabling CA replication agreement on %s to %s' % (master, host))
logger.info('Disabling CA replication agreement on %s to '
'%s', master, host)
repl.hostnames = [master, host]
repl.disable_agreement(host)
@@ -522,7 +531,7 @@ class Restore(admintool.AdminTool):
If executed online create a task and wait for it to complete.
'''
self.log.info('Restoring from %s in %s' % (backend, instance))
logger.info('Restoring from %s in %s', backend, instance)
cn = time.strftime('import_%Y_%m_%d_%H_%M_%S')
dn = DN(('cn', cn), ('cn', 'import'), ('cn', 'tasks'), ('cn', 'config'))
@@ -542,7 +551,7 @@ class Restore(admintool.AdminTool):
with open(ldiffile, 'wb') as out_file:
ldif_writer = ldif.LDIFWriter(out_file)
with open(srcldiffile, 'rb') as in_file:
ldif_parser = RemoveRUVParser(in_file, ldif_writer, self.log)
ldif_parser = RemoveRUVParser(in_file, ldif_writer)
ldif_parser.parse()
# Make sure the modified ldiffile is owned by DS_USER
@@ -565,10 +574,10 @@ class Restore(admintool.AdminTool):
try:
conn.add_entry(ent)
except Exception as e:
self.log.error("Unable to bind to LDAP server: %s" % e)
logger.error("Unable to bind to LDAP server: %s", e)
return
self.log.info("Waiting for LDIF to finish")
logger.info("Waiting for LDIF to finish")
wait_for_task(conn, dn)
else:
try:
@@ -582,7 +591,7 @@ class Restore(admintool.AdminTool):
'-n', backend]
result = run(args, raiseonerr=False)
if result.returncode != 0:
self.log.critical("ldif2db failed: %s" % result.error_log)
logger.critical("ldif2db failed: %s", result.error_log)
def bak2db(self, instance, backend, online=True):
@@ -598,9 +607,9 @@ class Restore(admintool.AdminTool):
to treat ipaca specially.
'''
if backend is not None:
self.log.info('Restoring %s in %s' % (backend, instance))
logger.info('Restoring %s in %s', backend, instance)
else:
self.log.info('Restoring %s' % instance)
logger.info('Restoring %s', instance)
cn = time.strftime('restore_%Y_%m_%d_%H_%M_%S')
@@ -626,7 +635,7 @@ class Restore(admintool.AdminTool):
raise admintool.ScriptError('Unable to bind to LDAP server: %s'
% e)
self.log.info("Waiting for restore to finish")
logger.info("Waiting for restore to finish")
wait_for_task(conn, dn)
else:
args = [paths.BAK2DB,
@@ -637,7 +646,7 @@ class Restore(admintool.AdminTool):
args.append(backend)
result = run(args, raiseonerr=False)
if result.returncode != 0:
self.log.critical("bak2db failed: %s" % result.error_log)
logger.critical("bak2db failed: %s", result.error_log)
def restore_default_conf(self):
@@ -659,8 +668,8 @@ class Restore(admintool.AdminTool):
result = run(args, raiseonerr=False)
if result.returncode != 0:
self.log.critical('Restoring %s failed: %s' %
(paths.IPA_DEFAULT_CONF, result.error_log))
logger.critical('Restoring %s failed: %s',
paths.IPA_DEFAULT_CONF, result.error_log)
os.chdir(cwd)
def remove_old_files(self):
@@ -673,15 +682,14 @@ class Restore(admintool.AdminTool):
shutil.rmtree(d)
except OSError as e:
if e.errno != 2: # 2: dir does not exist
self.log.warning("Could not remove directory: %s (%s)",
d, e)
logger.warning("Could not remove directory: %s (%s)", d, e)
for f in self.FILES_TO_BE_REMOVED:
try:
os.remove(f)
except OSError as e:
if e.errno != 2: # 2: file does not exist
self.log.warning("Could not remove file: %s (%s)", f, e)
logger.warning("Could not remove file: %s (%s)", f, e)
def file_restore(self, nologs=False):
'''
@@ -690,7 +698,7 @@ class Restore(admintool.AdminTool):
This MUST be done offline because we directly backup the 389-ds
databases.
'''
self.log.info("Restoring files")
logger.info("Restoring files")
cwd = os.getcwd()
os.chdir('/')
args = ['tar',
@@ -705,7 +713,7 @@ class Restore(admintool.AdminTool):
result = run(args, raiseonerr=False)
if result.returncode != 0:
self.log.critical('Restoring files failed: %s', result.error_log)
logger.critical('Restoring files failed: %s', result.error_log)
os.chdir(cwd)
@@ -750,7 +758,7 @@ class Restore(admintool.AdminTool):
encrypt = True
if encrypt:
self.log.info('Decrypting %s' % filename)
logger.info('Decrypting %s', filename)
filename = decrypt_file(self.dir, filename, keyring)
os.chdir(self.dir)
@@ -792,20 +800,20 @@ class Restore(admintool.AdminTool):
try:
pent = pwd.getpwnam(constants.PKI_USER)
except KeyError:
self.log.debug("No %s user exists, skipping CA directory creation",
logger.debug("No %s user exists, skipping CA directory creation",
constants.PKI_USER)
return
self.log.debug('Creating log directories for dogtag')
logger.debug('Creating log directories for dogtag')
for dir in dirs:
try:
self.log.debug('Creating %s' % dir)
logger.debug('Creating %s', dir)
os.mkdir(dir)
os.chmod(dir, 0o770)
os.chown(dir, pent.pw_uid, pent.pw_gid)
tasks.restore_context(dir)
except Exception as e:
# This isn't so fatal as to side-track the restore
self.log.error('Problem with %s: %s' % (dir, e))
logger.error('Problem with %s: %s', dir, e)
def restore_selinux_booleans(self):
bools = dict(constants.SELINUX_BOOLEAN_HTTPD)
@@ -813,13 +821,13 @@ class Restore(admintool.AdminTool):
if adtrustinstance:
bools.update(constants.SELINUX_BOOLEAN_ADTRUST)
else:
self.log.error(
logger.error(
'The AD trust package was not found, '
'not setting SELinux booleans.')
try:
tasks.set_selinux_booleans(bools)
except ipapython.errors.SetseboolError as e:
self.log.error('%s', e)
logger.error('%s', e)
def cert_restore_prepare(self):
cainstance.CAInstance().stop_tracking_certificates()
@@ -838,7 +846,7 @@ class Restore(admintool.AdminTool):
try:
ipautil.backup_file(filename)
except OSError as e:
self.log.error("Failed to backup %s: %s" % (filename, e))
logger.error("Failed to backup %s: %s", filename, e)
tasks.remove_ca_certs_from_systemwide_ca_store()
@@ -846,7 +854,7 @@ class Restore(admintool.AdminTool):
try:
update_ipa_nssdb()
except RuntimeError as e:
self.log.error("%s", e)
logger.error("%s", e)
tasks.reload_systemwide_ca_store()

View File

@@ -2,12 +2,16 @@
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
import logging
from ipalib import api
from ipaplatform.paths import paths
from ipapython import admintool
from ipaserver.install import installutils
from ipaserver.install import server
logger = logging.getLogger(__name__)
class ServerUpgrade(admintool.AdminTool):
log_file_name = paths.IPAUPGRADE_LOG
@@ -50,7 +54,7 @@ class ServerUpgrade(admintool.AdminTool):
def handle_error(self, exception):
if not isinstance(exception, SystemExit):
# do not log this message when ipa is not installed
self.log.error("IPA server upgrade failed: Inspect "
logger.error("IPA server upgrade failed: Inspect "
"/var/log/ipaupgrade.log and run command "
"ipa-server-upgrade manually.")
return installutils.handle_error(exception, self.log_file_name)

View File

@@ -17,6 +17,8 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import gssapi
import sys
@@ -33,6 +35,8 @@ from ipaserver.install import replication, installutils
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
DEFAULT_TRUST_VIEW_NAME = u'Default Trust View'
@@ -180,10 +184,10 @@ class WinsyncMigrate(admintool.AdminTool):
**kwargs
)
except Exception as e:
self.log.warning("Migration failed: %s (%s)"
% (user_identifier, str(e)))
logger.warning("Migration failed: %s (%s)",
user_identifier, str(e))
else:
self.log.debug("Migrated: %s" % user_identifier)
logger.debug("Migrated: %s", user_identifier)
def find_winsync_users(self):
"""
@@ -198,7 +202,7 @@ class WinsyncMigrate(admintool.AdminTool):
paged_search=True)
for entry in entries:
self.log.debug("Discovered entry: %s" % entry)
logger.debug("Discovered entry: %s", entry)
return entries
@@ -328,10 +332,11 @@ class WinsyncMigrate(admintool.AdminTool):
)
def warn_passsync(self):
self.log.warning("Migration completed. Please note that if PassSync "
logger.warning("Migration completed. Please note that if PassSync "
"was configured on the given Active Directory server, "
"it needs to be manually removed, otherwise it may try "
"to reset password for accounts that are no longer existent.")
"to reset password for accounts that are no longer "
"existent.")
@classmethod
def main(cls, argv):

View File

@@ -17,6 +17,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import os
import pwd
import shutil
@@ -38,7 +39,8 @@ from ipaserver.install import installutils
from ipaserver.install import ldapupdate
from ipaserver.install.dogtaginstance import DogtagInstance
from ipaserver.plugins import ldap2
from ipapython.ipa_log_manager import log_mgr
logger = logging.getLogger(__name__)
# When IPA is installed with DNS support, this CNAME should hold all IPA
# replicas with KRA configured
@@ -72,7 +74,6 @@ class KRAInstance(DogtagInstance):
)
self.basedn = DN(('o', 'kra'), ('o', 'ipaca'))
self.log = log_mgr.get_logger(self)
def configure_instance(self, realm_name, host_name, dm_password,
admin_password, pkcs12_info=None, master_host=None,
@@ -293,7 +294,7 @@ class KRAInstance(DogtagInstance):
os.remove(cfg_file)
shutil.move(paths.KRA_BACKUP_KEYS_P12, paths.KRACERT_P12)
self.log.debug("completed creating KRA instance")
logger.debug("completed creating KRA instance")
def __create_kra_agent(self):
"""

View File

@@ -23,6 +23,7 @@
# save undo files?
import base64
import logging
import sys
import uuid
import platform
@@ -41,11 +42,12 @@ from ipalib import api, create_api
from ipalib import constants
from ipaplatform.paths import paths
from ipapython.dn import DN
from ipapython.ipa_log_manager import log_mgr
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
UPDATES_DIR=paths.UPDATES_DIR
UPDATE_SEARCH_TIME_LIMIT = 30 # seconds
@@ -250,7 +252,6 @@ class LDAPUpdate(object):
update format.
'''
log_mgr.get_logger(self, True)
self.sub_dict = sub_dict
self.dm_password = dm_password
self.conn = None
@@ -548,8 +549,8 @@ class LDAPUpdate(object):
nsIndexAttribute=[attribute],
)
self.debug("Creating task to index attribute: %s", attribute)
self.debug("Task id: %s", dn)
logger.debug("Creating task to index attribute: %s", attribute)
logger.debug("Task id: %s", dn)
self.conn.add_entry(e)
@@ -571,10 +572,10 @@ class LDAPUpdate(object):
try:
entry = self.conn.get_entry(dn, attrlist)
except errors.NotFound as e:
self.error("Task not found: %s", dn)
logger.error("Task not found: %s", dn)
return
except errors.DatabaseError as e:
self.error("Task lookup failure %s", e)
logger.error("Task lookup failure %s", e)
return
status = entry.single_value.get('nstaskstatus')
@@ -584,10 +585,10 @@ class LDAPUpdate(object):
continue
if status.lower().find("finished") > -1:
self.debug("Indexing finished")
logger.debug("Indexing finished")
break
self.debug("Indexing in progress")
logger.debug("Indexing in progress")
time.sleep(1)
return
@@ -663,54 +664,76 @@ class LDAPUpdate(object):
entry_values = entry.raw.get(attr, [])
if action == 'remove':
self.debug("remove: '%s' from %s, current value %s", safe_output(attr, update_value), attr, safe_output(attr,entry_values))
logger.debug("remove: '%s' from %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
try:
entry_values.remove(update_value)
except ValueError:
self.debug(
logger.debug(
"remove: '%s' not in %s",
safe_output(attr, update_value), attr)
else:
entry.raw[attr] = entry_values
self.debug('remove: updated value %s', safe_output(
logger.debug('remove: updated value %s', safe_output(
attr, entry_values))
elif action == 'add':
self.debug("add: '%s' to %s, current value %s", safe_output(attr, update_value), attr, safe_output(attr, entry_values))
logger.debug("add: '%s' to %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
# Remove it, ignoring errors so we can blindly add it later
try:
entry_values.remove(update_value)
except ValueError:
pass
entry_values.append(update_value)
self.debug('add: updated value %s', safe_output(attr, entry_values))
logger.debug('add: updated value %s',
safe_output(attr, entry_values))
entry.raw[attr] = entry_values
elif action == 'addifnew':
self.debug("addifnew: '%s' to %s, current value %s", safe_output(attr, update_value), attr, safe_output(attr, entry_values))
logger.debug("addifnew: '%s' to %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
# Only add the attribute if it doesn't exist. Only works
# with single-value attributes. Entry must exist.
if entry.get('objectclass') and len(entry_values) == 0:
entry_values.append(update_value)
self.debug('addifnew: set %s to %s', attr, safe_output(attr, entry_values))
logger.debug('addifnew: set %s to %s',
attr, safe_output(attr, entry_values))
entry.raw[attr] = entry_values
elif action == 'addifexist':
self.debug("addifexist: '%s' to %s, current value %s", safe_output(attr, update_value), attr, safe_output(attr, entry_values))
logger.debug("addifexist: '%s' to %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
# Only add the attribute if the entry doesn't exist. We
# determine this based on whether it has an objectclass
if entry.get('objectclass'):
entry_values.append(update_value)
self.debug('addifexist: set %s to %s', attr, safe_output(attr, entry_values))
logger.debug('addifexist: set %s to %s',
attr, safe_output(attr, entry_values))
entry.raw[attr] = entry_values
elif action == 'only':
self.debug("only: set %s to '%s', current value %s", attr, safe_output(attr, update_value), safe_output(attr, entry_values))
logger.debug("only: set %s to '%s', current value %s",
attr,
safe_output(attr, update_value),
safe_output(attr, entry_values))
if only.get(attr):
entry_values.append(update_value)
else:
entry_values = [update_value]
only[attr] = True
entry.raw[attr] = entry_values
self.debug('only: updated value %s', safe_output(attr, entry_values))
logger.debug('only: updated value %s',
safe_output(attr, entry_values))
elif action == 'onlyifexist':
self.debug("onlyifexist: '%s' to %s, current value %s", safe_output(attr, update_value), attr, safe_output(attr, entry_values))
logger.debug("onlyifexist: '%s' to %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
# Only set the attribute if the entry exist's. We
# determine this based on whether it has an objectclass
if entry.get('objectclass'):
@@ -719,7 +742,8 @@ class LDAPUpdate(object):
else:
entry_values = [update_value]
only[attr] = True
self.debug('onlyifexist: set %s to %s', attr, safe_output(attr, entry_values))
logger.debug('onlyifexist: set %s to %s',
attr, safe_output(attr, entry_values))
entry.raw[attr] = entry_values
elif action == 'deleteentry':
# skip this update type, it occurs in __delete_entries()
@@ -731,24 +755,26 @@ class LDAPUpdate(object):
try:
entry_values.remove(old)
except ValueError:
self.debug('replace: %s not found, skipping', safe_output(attr, old))
logger.debug('replace: %s not found, skipping',
safe_output(attr, old))
else:
entry_values.append(new)
self.debug('replace: updated value %s', safe_output(attr, entry_values))
logger.debug('replace: updated value %s',
safe_output(attr, entry_values))
entry.raw[attr] = entry_values
return entry
def print_entity(self, e, message=None):
"""The entity object currently lacks a str() method"""
self.debug("---------------------------------------------")
logger.debug("---------------------------------------------")
if message:
self.debug("%s", message)
self.debug("dn: %s", e.dn)
logger.debug("%s", message)
logger.debug("dn: %s", e.dn)
for a, value in e.raw.items():
self.debug('%s:', a)
logger.debug('%s:', a)
for l in value:
self.debug("\t%s", safe_output(a, l))
logger.debug("\t%s", safe_output(a, l))
def _update_record(self, update):
found = False
@@ -763,15 +789,15 @@ class LDAPUpdate(object):
raise BadSyntax("More than 1 entry returned on a dn search!? %s" % new_entry.dn)
entry = e[0]
found = True
self.debug("Updating existing entry: %s", entry.dn)
logger.debug("Updating existing entry: %s", entry.dn)
except errors.NotFound:
# Doesn't exist, start with the default entry
entry = new_entry
self.debug("New entry: %s", entry.dn)
logger.debug("New entry: %s", entry.dn)
except errors.DatabaseError:
# Doesn't exist, start with the default entry
entry = new_entry
self.debug("New entry, using default value: %s", entry.dn)
logger.debug("New entry, using default value: %s", entry.dn)
self.print_entity(entry, "Initial value")
@@ -796,13 +822,13 @@ class LDAPUpdate(object):
except errors.NotFound:
# parent entry of the added entry does not exist
# this may not be an error (e.g. entries in NIS container)
self.error("Parent DN of %s may not exist, cannot "
logger.error("Parent DN of %s may not exist, cannot "
"create the entry", entry.dn)
return
added = True
self.modified = True
except Exception as e:
self.error("Add failure %s", e)
logger.error("Add failure %s", e)
else:
# Update LDAP
try:
@@ -812,19 +838,19 @@ class LDAPUpdate(object):
safe_changes = []
for (type, attr, values) in changes:
safe_changes.append((type, attr, safe_output(attr, values)))
self.debug("%s" % safe_changes)
self.debug("Updated %d" % updated)
logger.debug("%s", safe_changes)
logger.debug("Updated %d", updated)
if updated:
self.conn.update_entry(entry)
self.debug("Done")
logger.debug("Done")
except errors.EmptyModlist:
self.debug("Entry already up-to-date")
logger.debug("Entry already up-to-date")
updated = False
except errors.DatabaseError as e:
self.error("Update failed: %s", e)
logger.error("Update failed: %s", e)
updated = False
except errors.ACIError as e:
self.error("Update failed: %s", e)
logger.error("Update failed: %s", e)
updated = False
if updated:
@@ -844,14 +870,14 @@ class LDAPUpdate(object):
dn = updates['dn']
try:
self.debug("Deleting entry %s", dn)
logger.debug("Deleting entry %s", dn)
self.conn.delete_entry(dn)
self.modified = True
except errors.NotFound as e:
self.debug("%s did not exist:%s", dn, e)
logger.debug("%s did not exist:%s", dn, e)
self.modified = True
except errors.DatabaseError as e:
self.error("Delete failed: %s", e)
logger.error("Delete failed: %s", e)
def get_all_files(self, root, recursive=False):
"""Get all update files"""
@@ -866,7 +892,7 @@ class LDAPUpdate(object):
return f
def _run_update_plugin(self, plugin_name):
self.log.debug("Executing upgrade plugin: %s", plugin_name)
logger.debug("Executing upgrade plugin: %s", plugin_name)
restart_ds, updates = self.api.Updater[plugin_name]()
if updates:
self._run_updates(updates)
@@ -912,10 +938,10 @@ class LDAPUpdate(object):
for f in upgrade_files:
try:
self.debug("Parsing update file '%s'" % f)
logger.debug("Parsing update file '%s'", f)
data = self.read_file(f)
except Exception as e:
self.error("error reading update file '%s'", f)
logger.error("error reading update file '%s'", f)
raise RuntimeError(e)
self.parse_update_file(f, data, all_updates)
@@ -933,5 +959,5 @@ class LDAPUpdate(object):
self.conn = None
def restart_ds(self):
self.log.debug('Restarting directory server to apply updates')
logger.debug('Restarting directory server to apply updates')
installutils.restart_dirsrv()

View File

@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipalib import Registry, errors
from ipalib import Updater
from ipapython.dn import DN
@@ -24,6 +26,8 @@ from ipapython.ipa_log_manager import root_logger
from ipaserver.install import sysupgrade
from ipaserver.install.adtrustinstance import ADTRUSTInstance
logger = logging.getLogger(__name__)
register = Registry()
DEFAULT_ID_RANGE_SIZE = 200000
@@ -146,7 +150,7 @@ class update_default_trust_view(Updater):
# First, see if trusts are enabled on the server
if not self.api.Command.adtrust_is_enabled()['result']:
self.log.debug('AD Trusts are not enabled on this server')
logger.debug('AD Trusts are not enabled on this server')
return False, []
# Second, make sure the Default Trust View does not exist yet
@@ -155,7 +159,7 @@ class update_default_trust_view(Updater):
except errors.NotFound:
pass
else:
self.log.debug('Default Trust View already present on this server')
logger.debug('Default Trust View already present on this server')
return False, []
# We have a server with AD trust support without Default Trust View.
@@ -200,7 +204,7 @@ class update_sigden_extdom_broken_config(Updater):
try:
entry = ldap.get_entry(dn, attrs_list=[basedn_attr])
except errors.NotFound:
self.log.debug("configuration for %s not found, skipping", dn)
logger.debug("configuration for %s not found, skipping", dn)
else:
configured_suffix = entry.single_value.get(basedn_attr)
if configured_suffix is None:
@@ -212,19 +216,19 @@ class update_sigden_extdom_broken_config(Updater):
elif configured_suffix == "$SUFFIX":
# configured value is wrong, fix it
entry.single_value[basedn_attr] = str(self.api.env.basedn)
self.log.debug("updating attribute %s of %s to correct "
"value %s", basedn_attr, dn,
self.api.env.basedn)
logger.debug("updating attribute %s of %s to correct "
"value %s",
basedn_attr, dn, self.api.env.basedn)
ldap.update_entry(entry)
modified = True
else:
self.log.debug("configured basedn for %s is okay", dn)
logger.debug("configured basedn for %s is okay", dn)
return modified
def execute(self, **options):
if sysupgrade.get_upgrade_state('sidgen', 'config_basedn_updated'):
self.log.debug("Already done, skipping")
logger.debug("Already done, skipping")
return False, ()
restart = False
@@ -250,7 +254,7 @@ class update_sids(Updater):
ldap = self.api.Backend.ldap2
if sysupgrade.get_upgrade_state('sidgen', 'update_sids') is not True:
self.log.debug("SIDs do not need to be generated")
logger.debug("SIDs do not need to be generated")
return False, ()
# check if IPA domain for AD trust has been created, and if we need to
@@ -264,7 +268,7 @@ class update_sids(Updater):
try:
entry = ldap.get_entry(domain_IPA_AD_dn, attrs_list=[attr_name])
except errors.NotFound:
self.log.debug("IPA domain object %s is not configured",
logger.debug("IPA domain object %s is not configured",
domain_IPA_AD_dn)
sysupgrade.set_upgrade_state('sidgen', 'update_sids', False)
return False, ()
@@ -286,9 +290,9 @@ class update_sids(Updater):
try:
ldap.add_entry(task_entry)
except errors.DuplicateEntry:
self.log.debug("sidgen task already created")
logger.debug("sidgen task already created")
else:
self.log.debug("sidgen task has been created")
logger.debug("sidgen task has been created")
# we have to check all trusts domains which may been affected by the
# bug. Symptom is missing 'ipaNTSecurityIdentifier' attribute
@@ -307,11 +311,11 @@ class update_sids(Updater):
pass
else:
if truncated:
self.log.warning("update_sids: Search results were truncated")
logger.warning("update_sids: Search results were truncated")
for entry in trust_domain_entries:
domain = entry.single_value["cn"]
self.log.error(
logger.error(
"Your trust to %s is broken. Please re-create it by "
"running 'ipa trust-add' again.", domain)
@@ -331,7 +335,7 @@ class update_tdo_gidnumber(Updater):
# First, see if trusts are enabled on the server
if not self.api.Command.adtrust_is_enabled()['result']:
self.log.debug('AD Trusts are not enabled on this server')
logger.debug('AD Trusts are not enabled on this server')
return False, []
# Read the gidnumber of the fallback group
@@ -343,13 +347,13 @@ class update_tdo_gidnumber(Updater):
entry = ldap.get_entry(dn, ['gidnumber'])
gidNumber = entry.get('gidnumber')
except errors.NotFound:
self.log.error("{0} not found".format(
ADTRUSTInstance.FALLBACK_GROUP_NAME))
logger.error("%s not found",
ADTRUSTInstance.FALLBACK_GROUP_NAME)
return False, ()
if not gidNumber:
self.log.error("{0} does not have a gidnumber".format(
ADTRUSTInstance.FALLBACK_GROUP_NAME))
logger.error("%s does not have a gidnumber",
ADTRUSTInstance.FALLBACK_GROUP_NAME)
return False, ()
# For each trusted domain object, add gidNumber
@@ -366,14 +370,14 @@ class update_tdo_gidnumber(Updater):
try:
tdo['gidnumber'] = gidNumber
ldap.update_entry(tdo)
self.log.debug("Added gidnumber {0} to {1}".format(
gidNumber, tdo.dn))
logger.debug("Added gidnumber %s to %s",
gidNumber, tdo.dn)
except Exception:
self.log.warning(
"Failed to add gidnumber to {0}".format(tdo.dn))
logger.warning(
"Failed to add gidnumber to %s", tdo.dn)
except errors.NotFound:
self.log.debug("No trusted domain object to update")
logger.debug("No trusted domain object to update")
return False, ()
return False, ()

View File

@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipaserver.install import installutils, cainstance
from ipalib import errors
from ipalib import Updater
@@ -25,6 +27,8 @@ from ipalib.plugable import Registry
from ipaplatform.paths import paths
from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry()
@register()
@@ -36,7 +40,7 @@ class update_ca_renewal_master(Updater):
def execute(self, **options):
ca = cainstance.CAInstance(self.api.env.realm)
if not ca.is_configured():
self.debug("CA is not configured on this host")
logger.debug("CA is not configured on this host")
return False, []
ldap = self.api.Backend.ldap2
@@ -50,7 +54,7 @@ class update_ca_renewal_master(Updater):
except errors.NotFound:
pass
else:
self.debug("found CA renewal master %s", entries[0].dn[1].value)
logger.debug("found CA renewal master %s", entries[0].dn[1].value)
master = False
updates = []
@@ -78,11 +82,11 @@ class update_ca_renewal_master(Updater):
}
request_id = certmonger.get_request_id(criteria)
if request_id is not None:
self.debug("found certmonger request for RA cert")
logger.debug("found certmonger request for RA cert")
ca_name = certmonger.get_request_value(request_id, 'ca-name')
if ca_name is None:
self.warning(
logger.warning(
"certmonger request for RA cert is missing ca_name, "
"assuming local CA is renewal slave")
return False, []
@@ -95,12 +99,12 @@ class update_ca_renewal_master(Updater):
elif ca_name == 'dogtag-ipa-ca-renew-agent':
return False, []
else:
self.warning(
logger.warning(
"certmonger request for RA cert has unknown ca_name '%s', "
"assuming local CA is renewal slave", ca_name)
return False, []
else:
self.debug("certmonger request for RA cert not found")
logger.debug("certmonger request for RA cert not found")
config = installutils.get_directive(
paths.CA_CS_CFG_PATH, 'subsystem.select', '=')
@@ -110,7 +114,7 @@ class update_ca_renewal_master(Updater):
elif config == 'Clone':
return False, []
else:
self.warning(
logger.warning(
"CS.cfg has unknown subsystem.select value '%s', "
"assuming local CA is renewal slave", config)
return (False, False, [])

View File

@@ -19,6 +19,8 @@
from __future__ import absolute_import
import logging
import dns.exception
import re
import traceback
@@ -35,6 +37,8 @@ from ipaserver.install import sysupgrade
from ipaserver.install.bindinstance import ensure_dnsserver_container_exists
from ipaserver.plugins.dns import dns_container_exists
logger = logging.getLogger(__name__)
register = Registry()
@@ -65,8 +69,8 @@ class DNSUpdater(Updater):
@property
def ldif_writer(self):
if not self._ldif_writer:
self.log.info('Original zones will be saved in LDIF format in '
'%s file' % self.backup_path)
logger.info('Original zones will be saved in LDIF format in '
'%s file', self.backup_path)
self._ldif_writer = LDIFWriter(open(self.backup_path, 'w'))
return self._ldif_writer
@@ -137,7 +141,7 @@ class update_ipaconfigstring_dnsversion_to_ipadnsversion(Updater):
# version data are already migrated
return False, []
self.log.debug('Migrating DNS ipaConfigString to ipaDNSVersion')
logger.debug('Migrating DNS ipaConfigString to ipaDNSVersion')
container_entry['objectclass'].append('ipadnscontainer')
version = 0
for config_option in container_entry.get("ipaConfigString", []):
@@ -146,12 +150,12 @@ class update_ipaconfigstring_dnsversion_to_ipadnsversion(Updater):
if matched:
version = int(matched.group("version"))
else:
self.log.error(
logger.error(
'Failed to parse DNS version from ipaConfigString, '
'defaulting to version %s', version)
container_entry['ipadnsversion'] = version
ldap.update_entry(container_entry)
self.log.debug('ipaDNSVersion = %s', version)
logger.debug('ipaDNSVersion = %s', version)
return False, []
@@ -189,7 +193,7 @@ class update_dnszones(Updater):
try:
zones = self.api.Command.dnszone_find(all=True)['result']
except errors.NotFound:
self.log.debug('No DNS zone to update found')
logger.debug('No DNS zone to update found')
return False, []
for zone in zones:
@@ -284,7 +288,7 @@ class update_master_to_dnsforwardzones(DNSUpdater):
# no upgrade is required
return False, []
self.log.debug('Updating forward zones')
logger.debug('Updating forward zones')
# update the DNSVersion, following upgrade can be executed only once
self.api.Command['dnsconfig_mod'](ipadnsversion=1)
@@ -311,7 +315,7 @@ class update_master_to_dnsforwardzones(DNSUpdater):
pass
if not zones:
self.log.debug('No DNS zone to update found')
logger.debug('No DNS zone to update found')
return False, []
zones_to_transform = []
@@ -326,27 +330,27 @@ class update_master_to_dnsforwardzones(DNSUpdater):
zones_to_transform.append(zone)
if zones_to_transform:
self.log.info('Zones with specified forwarders with policy different'
' than none will be transformed to forward zones.')
logger.info('Zones with specified forwarders with policy '
'different than none will be transformed to forward '
'zones.')
# update
for zone in zones_to_transform:
try:
self.backup_zone(zone)
except Exception:
self.log.error('Unable to create backup for zone, '
logger.error('Unable to create backup for zone, '
'terminating zone upgrade')
self.log.error(traceback.format_exc())
logger.error("%s", traceback.format_exc())
return False, []
# delete master zone
try:
self.api.Command['dnszone_del'](zone['idnsname'])
except Exception as e:
self.log.error('Transform to forwardzone terminated: '
'removing zone %s failed (%s)' % (
logger.error('Transform to forwardzone terminated: '
'removing zone %s failed (%s)',
zone['idnsname'][0], e)
)
self.log.error(traceback.format_exc())
logger.error("%s", traceback.format_exc())
continue
# create forward zone
@@ -358,11 +362,11 @@ class update_master_to_dnsforwardzones(DNSUpdater):
'skip_overlap_check': True,
}
self.api.Command['dnsforwardzone_add'](zone['idnsname'][0], **kw)
except Exception as e:
self.log.error('Transform to forwardzone terminated: creating '
'forwardzone %s failed' %
except Exception:
logger.error('Transform to forwardzone terminated: '
'creating forwardzone %s failed',
zone['idnsname'][0])
self.log.error(traceback.format_exc())
logger.error("%s", traceback.format_exc())
continue
# create permission if original zone has one
@@ -370,12 +374,12 @@ class update_master_to_dnsforwardzones(DNSUpdater):
try:
perm_name = self.api.Command['dnsforwardzone_add_permission'](
zone['idnsname'][0])['value']
except Exception as e:
self.log.error('Transform to forwardzone terminated: '
'Adding managed by permission to forward zone'
' %s failed' % zone['idnsname'])
self.log.error(traceback.format_exc())
self.log.info('Zone %s was transformed to forward zone '
except Exception:
logger.error('Transform to forwardzone terminated: '
'Adding managed by permission to forward '
'zone %s failed', zone['idnsname'])
logger.error("%s", traceback.format_exc())
logger.info('Zone %s was transformed to forward zone '
' without managed permissions',
zone['idnsname'][0])
continue
@@ -388,17 +392,19 @@ class update_master_to_dnsforwardzones(DNSUpdater):
try:
self.api.Command['permission_add_member'](perm_name,
privilege=privileges)
except Exception as e:
self.log.error('Unable to restore privileges for '
'permission %s, for zone %s'
% (perm_name, zone['idnsname']))
self.log.error(traceback.format_exc())
self.log.info('Zone %s was transformed to forward zone'
' without restored privileges',
except Exception:
logger.error('Unable to restore privileges '
'for permission %s, for zone %s',
perm_name, zone['idnsname'])
logger.error("%s", traceback.format_exc())
logger.info('Zone %s was transformed to '
'forward zone without restored '
'privileges',
zone['idnsname'][0])
continue
self.log.debug('Zone %s was sucessfully transformed to forward zone',
logger.debug('Zone %s was sucessfully transformed to forward '
'zone',
zone['idnsname'][0])
return False, []
@@ -437,18 +443,18 @@ class update_dnsforward_emptyzones(DNSUpdater):
continue
if not logged_once:
self.log.info('Forward policy for zones conflicting with '
'automatic empty zones will be changed to '
'"only"')
logger.info('Forward policy for zones conflicting with '
'automatic empty zones will be changed to "only"')
logged_once = True
# backup
try:
self.backup_zone(zone)
except Exception:
self.log.error('Unable to create backup for zone %s, '
'terminating zone upgrade', zone['idnsname'][0])
self.log.error(traceback.format_exc())
logger.error('Unable to create backup for zone %s, '
'terminating zone upgrade',
zone['idnsname'][0])
logger.error("%s", traceback.format_exc())
continue
# change forward policy
@@ -458,13 +464,13 @@ class update_dnsforward_emptyzones(DNSUpdater):
idnsforwardpolicy=u'only'
)
except Exception as e:
self.log.error('Forward policy update for zone %s failed '
'(%s)' % (zone['idnsname'][0], e))
self.log.error(traceback.format_exc())
logger.error('Forward policy update for zone %s failed '
'(%s)', zone['idnsname'][0], e)
logger.error("%s", traceback.format_exc())
continue
self.log.debug('Zone %s was sucessfully modified to use '
'forward policy "only"', zone['idnsname'][0])
logger.debug('Zone %s was sucessfully modified to use forward '
'policy "only"', zone['idnsname'][0])
def update_global_ldap_forwarder(self):
config = self.api.Command['dnsconfig_show'](all=True,
@@ -473,7 +479,7 @@ class update_dnsforward_emptyzones(DNSUpdater):
config.get('idnsforwardpolicy', [u'first'])[0] == u'first'
and config.get('idnsforwarders', [])
):
self.log.info('Global forward policy in LDAP for all servers will '
logger.info('Global forward policy in LDAP for all servers will '
'be changed to "only" to avoid conflicts with '
'automatic empty zones')
self.backup_zone(config)
@@ -485,7 +491,7 @@ class update_dnsforward_emptyzones(DNSUpdater):
# forwardzones already use new semantics, no upgrade is required
return False, []
self.log.debug('Updating forwarding policies in LDAP '
logger.debug('Updating forwarding policies in LDAP '
'to avoid conflicts with automatic empty zones')
# update the DNSVersion, following upgrade can be executed only once
self.api.Command['dnsconfig_mod'](ipadnsversion=2)
@@ -495,7 +501,7 @@ class update_dnsforward_emptyzones(DNSUpdater):
if dnsutil.has_empty_zone_addresses(self.api.env.host):
self.update_global_ldap_forwarder()
except dns.exception.DNSException as ex:
self.log.error('Skipping update of global DNS forwarder in LDAP: '
logger.error('Skipping update of global DNS forwarder in LDAP: '
'Unable to determine if local server is using an '
'IP address belonging to an automatic empty zone. '
'Consider changing forwarding policy to "only". '
@@ -513,33 +519,33 @@ class update_dnsserver_configuration_into_ldap(DNSUpdater):
def execute(self, **options):
ldap = self.api.Backend.ldap2
if sysupgrade.get_upgrade_state('dns', 'server_config_to_ldap'):
self.log.debug('upgrade is not needed')
logger.debug('upgrade is not needed')
return False, []
dns_container_dn = DN(self.api.env.container_dns, self.api.env.basedn)
try:
ldap.get_entry(dns_container_dn)
except errors.NotFound:
self.log.debug('DNS container not found, nothing to upgrade')
logger.debug('DNS container not found, nothing to upgrade')
sysupgrade.set_upgrade_state('dns', 'server_config_to_ldap', True)
return False, []
result = self.api.Command.server_show(self.api.env.host)['result']
if not 'DNS server' in result.get('enabled_role_servrole', []):
self.log.debug('This server is not DNS server, nothing to upgrade')
logger.debug('This server is not DNS server, nothing to upgrade')
sysupgrade.set_upgrade_state('dns', 'server_config_to_ldap', True)
return False, []
# create container first, if doesn't exist
ensure_dnsserver_container_exists(ldap, self.api, logger=self.log)
ensure_dnsserver_container_exists(ldap, self.api)
try:
self.api.Command.dnsserver_add(self.api.env.host)
except errors.DuplicateEntry:
self.log.debug("DNS server configuration already exists "
logger.debug("DNS server configuration already exists "
"in LDAP database")
else:
self.log.debug("DNS server configuration has been sucessfully "
logger.debug("DNS server configuration has been sucessfully "
"created in LDAP database")
sysupgrade.set_upgrade_state('dns', 'server_config_to_ldap', True)
return False, []

View File

@@ -17,10 +17,14 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipaserver.install import replication
from ipalib import Registry
from ipalib import Updater
logger = logging.getLogger(__name__)
register = Registry()
EXCLUDE_TEMPLATE = '(objectclass=*) $ EXCLUDE %s'
@@ -36,7 +40,7 @@ class update_replica_attribute_lists(Updater):
def execute(self, **options):
# We need an LDAPClient connection to the backend
self.log.debug("Start replication agreement exclude list update task")
logger.debug("Start replication agreement exclude list update task")
conn = self.api.Backend.ldap2
repl = replication.ReplicationManager(self.api.env.realm,
@@ -46,11 +50,11 @@ class update_replica_attribute_lists(Updater):
# We need to update only IPA replica agreements, not winsync
ipa_replicas = repl.find_ipa_replication_agreements()
self.log.debug("Found %d agreement(s)", len(ipa_replicas))
logger.debug("Found %d agreement(s)", len(ipa_replicas))
for replica in ipa_replicas:
for desc in replica.get('description', []):
self.log.debug(desc)
logger.debug('%s', desc)
self._update_attr(repl, replica,
'nsDS5ReplicatedAttributeList',
@@ -61,7 +65,7 @@ class update_replica_attribute_lists(Updater):
self._update_attr(repl, replica,
'nsds5ReplicaStripAttrs', replication.STRIP_ATTRS)
self.log.debug("Done updating agreements")
logger.debug("Done updating agreements")
return False, [] # No restart, no updates
@@ -81,16 +85,16 @@ class update_replica_attribute_lists(Updater):
"""
attrlist = replica.single_value.get(attribute)
if attrlist is None:
self.log.debug("Adding %s", attribute)
logger.debug("Adding %s", attribute)
# Need to add it altogether
replica[attribute] = [template % " ".join(values)]
try:
repl.conn.update_entry(replica)
self.log.debug("Updated")
logger.debug("Updated")
except Exception as e:
self.log.error("Error caught updating replica: %s", str(e))
logger.error("Error caught updating replica: %s", str(e))
else:
attrlist_normalized = attrlist.lower().split()
@@ -98,7 +102,7 @@ class update_replica_attribute_lists(Updater):
if a.lower() not in attrlist_normalized]
if missing:
self.log.debug("%s needs updating (missing: %s)", attribute,
logger.debug("%s needs updating (missing: %s)", attribute,
', '.join(missing))
replica[attribute] = [
@@ -106,9 +110,9 @@ class update_replica_attribute_lists(Updater):
try:
repl.conn.update_entry(replica)
self.log.debug("Updated %s", attribute)
logger.debug("Updated %s", attribute)
except Exception as e:
self.log.error("Error caught updating %s: %s",
logger.error("Error caught updating %s: %s",
attribute, str(e))
else:
self.log.debug("%s: No update necessary" % attribute)
logger.debug("%s: No update necessary", attribute)

View File

@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import six
from ipalib import Registry, errors
@@ -24,6 +26,8 @@ from ipalib import Updater
from ipapython import ipautil
from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry()
if six.PY3:
@@ -56,7 +60,7 @@ class GenerateUpdateMixin(object):
If the input DN doesn't end with old_suffix, log, an raise ValueError.
"""
if not dn.endswith(old_suffix):
self.error("unable to replace suffix '%s' with '%s' in '%s'",
logger.error("unable to replace suffix '%s' with '%s' in '%s'",
old_suffix, new_suffix, dn)
raise ValueError('no replacement made')
return DN(*dn[:-len(old_suffix)]) + new_suffix

View File

@@ -2,6 +2,8 @@
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
import logging
from ipalib import errors
from ipalib import Registry
from ipalib import Updater
@@ -10,6 +12,8 @@ from ipaserver.install import cainstance
from ipaserver.install import ldapupdate
from ipaplatform.paths import paths
logger = logging.getLogger(__name__)
register = Registry()
@@ -23,7 +27,7 @@ class update_ca_topology(Updater):
ca = cainstance.CAInstance(self.api.env.realm)
if not ca.is_configured():
self.log.debug("CA is not configured on this host")
logger.debug("CA is not configured on this host")
return False, []
ld = ldapupdate.LDAPUpdate(ldapi=True, sub_dict={

View File

@@ -2,6 +2,7 @@
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
import logging
import time
import ldap
@@ -10,6 +11,8 @@ from ipalib import errors
from ipalib import Updater
from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry()
@@ -35,14 +38,14 @@ class update_dna_shared_config(Updater):
if entry.single_value.get('nsslapd-pluginenabled') == 'off':
return False, ()
except errors.NotFound:
self.log.error("Could not find DNA plugin entry: %s" %
logger.error("Could not find DNA plugin entry: %s",
dna_config_base)
return False, ()
try:
entry = conn.get_entry(dna_config_base)
except errors.NotFound:
self.log.error("Could not find DNA config entry: %s" %
logger.error("Could not find DNA config entry: %s",
dna_config_base)
return False, ()
@@ -50,8 +53,8 @@ class update_dna_shared_config(Updater):
if sharedcfgdn is not None:
sharedcfgdn = DN(sharedcfgdn)
else:
self.log.error(
"Could not find DNA shared config DN in entry: %s" %
logger.error(
"Could not find DNA shared config DN in entry: %s",
dna_config_base)
return False, ()
@@ -80,25 +83,25 @@ class update_dna_shared_config(Updater):
)
break
except errors.NotFound:
self.log.debug(
logger.debug(
"Unable to find DNA shared config entry for "
"dnaHostname=%s (under %s) so far. Retry in 2 sec." %
(fqdn, sharedcfgdn)
"dnaHostname=%s (under %s) so far. Retry in 2 sec.",
fqdn, sharedcfgdn
)
time.sleep(2)
else:
self.log.error(
"Could not get dnaHostname entries in {} seconds".format(
max_wait * 2)
logger.error(
"Could not get dnaHostname entries in %s seconds",
max_wait * 2
)
return False, ()
# If there are several entries, all of them will be updated
# just log a debug msg. This is likely the result of #5510
if len(entries) != 1:
self.log.debug(
"%d entries dnaHostname=%s under %s. One expected" %
(len(entries), fqdn, sharedcfgdn)
logger.debug(
"%d entries dnaHostname=%s under %s. One expected",
len(entries), fqdn, sharedcfgdn
)
# time to set the bind method and the protocol in the
@@ -117,9 +120,9 @@ class update_dna_shared_config(Updater):
try:
conn.update_entry(entry)
except Exception as e:
self.log.error(
logger.error(
"Failed to set SASL/GSSAPI bind method/protocol "
"in entry {}: {}".format(entry, e)
"in entry %s: %s", entry, e
)
# no restart, no update
return False, ()

View File

@@ -83,6 +83,8 @@ No other keys are allowed in the template
The plugin also deletes permissions specified in OBSOLETE_PERMISSIONS.
"""
import logging
import six
from ipalib import api, errors
@@ -97,6 +99,8 @@ from ipaserver.plugins.permission import permission, permission_del
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
register = Registry()
OBSOLETE_PERMISSIONS = {
@@ -327,7 +331,7 @@ class update_managed_permissions(Updater):
for acistr in acistrs:
if ACI(acistr).isequal(anonymous_read_aci):
self.log.debug('Removing anonymous ACI: %s', acistr)
logger.debug('Removing anonymous ACI: %s', acistr)
acistrs.remove(acistr)
break
else:
@@ -356,18 +360,18 @@ class update_managed_permissions(Updater):
anonymous_read_aci = self.get_anonymous_read_aci(ldap)
if anonymous_read_aci:
self.log.debug('Anonymous read ACI: %s', anonymous_read_aci)
logger.debug('Anonymous read ACI: %s', anonymous_read_aci)
else:
self.log.debug('Anonymous ACI not found')
logger.debug('Anonymous ACI not found')
current_obj = () # initially distinct from any obj value, even None
for name, template, obj in self.get_templates():
if current_obj != obj:
if obj:
self.log.debug('Updating managed permissions for %s',
logger.debug('Updating managed permissions for %s',
obj.name)
else:
self.log.debug('Updating non-object managed permissions')
logger.debug('Updating non-object managed permissions')
current_obj = obj
self.update_permission(ldap,
@@ -380,15 +384,15 @@ class update_managed_permissions(Updater):
self.remove_anonymous_read_aci(ldap, anonymous_read_aci)
for obsolete_name in OBSOLETE_PERMISSIONS:
self.log.debug('Deleting obsolete permission %s', obsolete_name)
logger.debug('Deleting obsolete permission %s', obsolete_name)
try:
self.api.Command[permission_del](unicode(obsolete_name),
force=True,
version=u'2.101')
except errors.NotFound:
self.log.debug('Obsolete permission not found')
logger.debug('Obsolete permission not found')
else:
self.log.debug('Obsolete permission deleted: %s', obsolete_name)
logger.debug('Obsolete permission deleted: %s', obsolete_name)
return False, ()
@@ -429,7 +433,7 @@ class update_managed_permissions(Updater):
legacy_entry = ldap.get_entry(legacy_dn,
['ipapermissiontype', 'cn'])
except errors.NotFound:
self.log.debug("Legacy permission %s not found", legacy_name)
logger.debug("Legacy permission %s not found", legacy_name)
else:
if 'ipapermissiontype' not in legacy_entry:
if is_new:
@@ -440,47 +444,47 @@ class update_managed_permissions(Updater):
included, excluded = self.get_upgrade_attr_lists(
acistr, legacy_acistrs)
except IncompatibleACIModification:
self.log.error(
logger.error(
"Permission '%s' has been modified from its "
"default; not updating it to '%s'.",
legacy_name, name)
return
else:
self.log.debug("Merging attributes from legacy "
logger.debug("Merging attributes from legacy "
"permission '%s'", legacy_name)
self.log.debug("Included attrs: %s",
logger.debug("Included attrs: %s",
', '.join(sorted(included)))
self.log.debug("Excluded attrs: %s",
logger.debug("Excluded attrs: %s",
', '.join(sorted(excluded)))
entry['ipapermincludedattr'] = list(included)
entry['ipapermexcludedattr'] = list(excluded)
remove_legacy = True
else:
self.log.debug("Ignoring attributes in legacy "
logger.debug("Ignoring attributes in legacy "
"permission '%s' because '%s' exists",
legacy_name, name)
remove_legacy = True
else:
self.log.debug("Ignoring V2 permission named '%s'" %
logger.debug("Ignoring V2 permission named '%s'",
legacy_name)
update_aci = True
self.log.debug('Updating managed permission: %s', name)
logger.debug('Updating managed permission: %s', name)
if is_new:
ldap.add_entry(entry)
else:
try:
ldap.update_entry(entry)
except errors.EmptyModlist:
self.log.debug('No changes to permission: %s', name)
logger.debug('No changes to permission: %s', name)
update_aci = False
if update_aci:
self.log.debug('Updating ACI for managed permission: %s', name)
logger.debug('Updating ACI for managed permission: %s', name)
permission_plugin.update_aci(entry)
if remove_legacy:
self.log.debug("Removing legacy permission '%s'", legacy_name)
logger.debug("Removing legacy permission '%s'", legacy_name)
self.api.Command[permission_del](unicode(legacy_name))
for name in template.get('replaces_system', ()):
@@ -489,14 +493,14 @@ class update_managed_permissions(Updater):
entry = ldap.get_entry(permission_plugin.get_dn(name),
['ipapermissiontype'])
except errors.NotFound:
self.log.debug("Legacy permission '%s' not found", name)
logger.debug("Legacy permission '%s' not found", name)
else:
flags = entry.get('ipapermissiontype', [])
if list(flags) == ['SYSTEM']:
self.log.debug("Removing legacy permission '%s'", name)
logger.debug("Removing legacy permission '%s'", name)
self.api.Command[permission_del](name, force=True)
else:
self.log.debug("Ignoring V2 permission '%s'", name)
logger.debug("Ignoring V2 permission '%s'", name)
def get_upgrade_attr_lists(self, current_acistring, default_acistrings):
"""Compute included and excluded attributes for a new permission
@@ -541,7 +545,7 @@ class update_managed_permissions(Updater):
current_aci = ACI(current_acistring)
current_attrs = _pop_targetattr(current_aci)
self.log.debug("Current ACI for '%s': %s",
logger.debug("Current ACI for '%s': %s",
current_aci.name, current_acistring)
attrs_in_all_defaults = None
@@ -550,11 +554,11 @@ class update_managed_permissions(Updater):
for default_acistring in default_acistrings:
default_aci = ACI(default_acistring)
default_attrs = _pop_targetattr(default_aci)
self.log.debug("Default ACI for '%s': %s",
logger.debug("Default ACI for '%s': %s",
default_aci.name, default_acistring)
if current_aci != default_aci:
self.log.debug('ACIs not compatible')
logger.debug('ACIs not compatible')
continue
else:
all_incompatible = False
@@ -566,7 +570,7 @@ class update_managed_permissions(Updater):
attrs_in_any_defaults |= default_attrs
if all_incompatible:
self.log.debug('All old default ACIs are incompatible')
logger.debug('All old default ACIs are incompatible')
raise(IncompatibleACIModification())
included = current_attrs - attrs_in_any_defaults
@@ -659,7 +663,7 @@ class update_managed_permissions(Updater):
anonymous_read_aci.target['targetattr']['expression'])
read_blacklist &= attributes
if read_blacklist:
self.log.debug('Excluded attributes for %s: %s',
logger.debug('Excluded attributes for %s: %s',
name, ', '.join(read_blacklist))
entry['ipapermexcludedattr'] = list(read_blacklist)
@@ -700,7 +704,7 @@ class update_read_replication_agreements_permission(Updater):
try:
perm_entry = ldap.get_entry(old_perm_dn)
except errors.NotFound:
self.log.debug("Old permission not found")
logger.debug("Old permission not found")
return False, ()
try:
@@ -709,9 +713,9 @@ class update_read_replication_agreements_permission(Updater):
# we can happily upgrade
pass
else:
self.log.error("Permission '{}' cannot be upgraded. "
"Permission with target name '{}' already "
"exists".format(old_perm_dn, new_perm_dn))
logger.error("Permission '%s' cannot be upgraded. "
"Permission with target name '%s' already "
"exists", old_perm_dn, new_perm_dn)
return False, ()
# values are case insensitive
@@ -729,7 +733,7 @@ class update_read_replication_agreements_permission(Updater):
perm_entry['ipapermlocation'] = []
perm_entry['ipapermtargetfilter'] = []
self.log.debug("Removing MANAGED attributes from permission %s",
logger.debug("Removing MANAGED attributes from permission %s",
old_perm_dn)
try:
ldap.update_entry(perm_entry)
@@ -737,6 +741,6 @@ class update_read_replication_agreements_permission(Updater):
pass
# do modrdn on permission
self.log.debug("modrdn: %s -> %s", old_perm_dn, new_perm_dn)
logger.debug("modrdn: %s -> %s", old_perm_dn, new_perm_dn)
ldap.move_entry(old_perm_dn, new_perm_dn)
return False, ()

View File

@@ -2,6 +2,8 @@
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
import logging
from ipalib.plugable import Registry
from ipalib import errors
from ipalib import Updater
@@ -10,6 +12,8 @@ from ipapython.dn import DN
from ipaserver.install import sysupgrade
from ipaserver.install.ldapupdate import LDAPUpdate
logger = logging.getLogger(__name__)
register = Registry()
@@ -32,7 +36,7 @@ class update_nis_configuration(Updater):
# maps, we do not want to restore them again
return
self.log.debug("Recovering from missing NIS maps bug")
logger.debug("Recovering from missing NIS maps bug")
suffix = "cn=NIS Server,cn=plugins,cn=config"
domain = self.api.env.domain
@@ -71,7 +75,7 @@ class update_nis_configuration(Updater):
ldap.get_entry(dn, attrs_list=['cn'])
except errors.NotFound:
# NIS is not configured on system, do not execute update
self.log.debug("Skipping NIS update, NIS Server is not configured")
logger.debug("Skipping NIS update, NIS Server is not configured")
# container does not exist, bug #5507 is not effective
sysupgrade.set_upgrade_state(
@@ -79,7 +83,7 @@ class update_nis_configuration(Updater):
else:
self.__recover_from_missing_maps(ldap)
self.log.debug("Executing NIS Server update")
logger.debug("Executing NIS Server update")
ld = LDAPUpdate(sub_dict={}, ldapi=True)
ld.update([paths.NIS_UPDATE_ULDIF])

View File

@@ -17,10 +17,14 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipalib import Registry, errors
from ipalib import Updater
from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry()
@@ -38,18 +42,18 @@ class update_pacs(Updater):
entry = ldap.get_entry(dn, ['ipakrbauthzdata'])
pacs = entry.get('ipakrbauthzdata', [])
except errors.NotFound:
self.log.warning('Error retrieving: %s' % str(dn))
logger.warning('Error retrieving: %s', str(dn))
return False, []
nfs_pac_set = any(pac.startswith('nfs:') for pac in pacs)
if not nfs_pac_set:
self.log.debug('Adding nfs:NONE to default PAC types')
logger.debug('Adding nfs:NONE to default PAC types')
updated_pacs = pacs + [u'nfs:NONE']
entry['ipakrbauthzdata'] = updated_pacs
ldap.update_entry(entry)
else:
self.log.debug('PAC for nfs is already set, not adding nfs:NONE.')
logger.debug('PAC for nfs is already set, not adding nfs:NONE.')
return False, []

View File

@@ -2,6 +2,7 @@
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
import logging
import os
import tempfile
@@ -12,6 +13,8 @@ from ipaplatform.paths import paths
from ipapython.certdb import NSSDatabase
from ipaserver.install import cainstance
logger = logging.getLogger(__name__)
register = Registry()
@@ -35,11 +38,11 @@ class update_ra_cert_store(Updater):
elif os.path.exists(paths.RA_AGENT_PEM):
# even though the certificate file exists, we will overwrite it
# as it's probabably something wrong anyway
self.log.warning(
logger.warning(
"A certificate with the nickname 'ipaCert' exists in "
"the old '{}' NSS database as well as in the new "
"PEM file '{}'"
.format(paths.HTTPD_ALIAS_DIR, paths.RA_AGENT_PEM))
"the old '%s' NSS database as well as in the new "
"PEM file '%s'",
paths.HTTPD_ALIAS_DIR, paths.RA_AGENT_PEM)
_fd, p12file = tempfile.mkstemp(dir=certdb.secdir)
# no password is necessary as we will be saving it in clear anyway

View File

@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipalib.install import certstore
from ipaplatform.paths import paths
from ipaserver.install import certs
@@ -25,6 +27,8 @@ from ipalib import Updater
from ipapython import certdb
from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry()
@@ -66,7 +70,7 @@ class update_upload_cacrt(Updater):
try:
certstore.init_ca_entry(entry, cert, nickname, trust, eku)
except Exception as e:
self.log.warning("Failed to create entry for %s: %s",
logger.warning("Failed to create entry for %s: %s",
nickname, e)
continue
if nickname == ca_nickname:

View File

@@ -45,6 +45,8 @@ And then a nested response for each IPA command method sent in the request
"""
import logging
import six
from ipalib import api, errors
@@ -60,6 +62,8 @@ from ipapython.version import API_VERSION
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
register = Registry()
@register()
@@ -120,7 +124,7 @@ class batch(Command):
newkw.setdefault('version', options['version'])
result = api.Command[name](*a, **newkw)
self.info(
logger.info(
'%s: batch: %s(%s): SUCCESS',
getattr(context, 'principal', 'UNKNOWN'),
name,
@@ -130,13 +134,13 @@ class batch(Command):
except Exception as e:
if isinstance(e, errors.RequirementError) or \
isinstance(e, errors.CommandError):
self.info(
logger.info(
'%s: batch: %s',
context.principal, # pylint: disable=no-member
e.__class__.__name__
)
else:
self.info(
logger.info(
'%s: batch: %s(%s): %s',
context.principal, name, # pylint: disable=no-member
', '.join(api.Command[name]._repr_iter(**params)),

View File

@@ -22,6 +22,7 @@
import base64
import collections
import datetime
import logging
from operator import attrgetter
import os
@@ -148,6 +149,8 @@ http://www.ietf.org/rfc/rfc5280.txt
""")
logger = logging.getLogger(__name__)
USER, HOST, KRBTGT, SERVICE = range(4)
register = Registry()
@@ -921,7 +924,7 @@ class cert_request(Create, BaseCertMethod, VirtualCommand):
elif principal_type == USER:
api.Command['user_mod'](principal.username, **kwargs)
elif principal_type == KRBTGT:
self.log.error("Profiles used to store cert should't be "
logger.error("Profiles used to store cert should't be "
"used for krbtgt certificates")
if 'certificate_chain' in ca_obj:
@@ -1194,7 +1197,8 @@ class cert_show(Retrieve, CertMethod, VirtualCommand):
try:
self.check_access()
except errors.ACIError as acierr:
self.debug("Not granted by ACI to retrieve certificate, looking at principal")
logger.debug("Not granted by ACI to retrieve certificate, "
"looking at principal")
if not bind_principal_can_manage_cert(cert):
raise acierr # pylint: disable=E0702
@@ -1264,7 +1268,8 @@ class cert_revoke(PKQuery, CertMethod, VirtualCommand):
try:
self.check_access()
except errors.ACIError as acierr:
self.debug("Not granted by ACI to revoke certificate, looking at principal")
logger.debug("Not granted by ACI to revoke certificate, "
"looking at principal")
try:
cert = x509.load_certificate(resp['result']['certificate'])
if not bind_principal_can_manage_cert(cert):

View File

@@ -18,6 +18,8 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import logging
import dbus
import six
@@ -44,6 +46,8 @@ from ipaserver.plugins.service import validate_certificate
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
__doc__ = _("""
Certificate Identity Mapping
""") + _("""
@@ -403,14 +407,14 @@ class _sssd(object):
"""
Auxiliary class for SSSD infopipe DBus.
"""
def __init__(self, log):
def __init__(self):
"""
Initialize the Users object and interface.
:raise RemoteRetrieveError: if DBus error occurs
"""
try:
self.log = log
self.log = logger
self._bus = dbus.SystemBus()
self._users_obj = self._bus.get_object(
DBUS_SSSD_NAME, DBUS_SSSD_USERS_PATH)
@@ -535,7 +539,7 @@ class certmap_match(Search):
FreeIPA domain and trusted domains.
:raise RemoteRetrieveError: if DBus returns an exception
"""
sssd = _sssd(self.log)
sssd = _sssd()
cert = args[0]
users = sssd.list_users_by_cert(cert)

View File

@@ -20,6 +20,8 @@
from __future__ import absolute_import
import logging
import netaddr
import time
import re
@@ -311,6 +313,8 @@ server:
ipa dnsconfig-mod --forwarder=203.0.113.113
""")
logger = logging.getLogger(__name__)
register = Registry()
# supported resource record types
@@ -1542,7 +1546,7 @@ def __dns_record_options_iter():
_dns_record_options = tuple(__dns_record_options_iter())
def check_ns_rec_resolvable(zone, name, log):
def check_ns_rec_resolvable(zone, name):
assert isinstance(zone, DNSName)
assert isinstance(name, DNSName)
@@ -2772,8 +2776,7 @@ class dnszone_add(DNSZoneBase_add):
# verify if user specified server is resolvable
if not options['skip_nameserver_check']:
check_ns_rec_resolvable(keys[0], entry_attrs['idnssoamname'],
self.log)
check_ns_rec_resolvable(keys[0], entry_attrs['idnssoamname'])
# show warning about --name-server option
context.show_warning_nameserver_option = True
else:
@@ -2871,7 +2874,7 @@ class dnszone_mod(DNSZoneBase_mod):
nameserver = entry_attrs['idnssoamname']
if nameserver:
if not nameserver.is_empty() and not options['force']:
check_ns_rec_resolvable(keys[0], nameserver, self.log)
check_ns_rec_resolvable(keys[0], nameserver)
context.show_warning_nameserver_option = True
else:
# empty value, this option is required by ldap
@@ -3044,7 +3047,7 @@ class dnsrecord(LDAPObject):
if options.get('force', False) or nsrecords is None:
return
for nsrecord in nsrecords:
check_ns_rec_resolvable(keys[0], DNSName(nsrecord), self.log)
check_ns_rec_resolvable(keys[0], DNSName(nsrecord))
def _idnsname_pre_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
@@ -3350,7 +3353,7 @@ class dnsrecord(LDAPObject):
ldap_rrsets[rdtype] = ldap_rrset
except dns.exception.SyntaxError as e:
self.log.error('DNS syntax error: %s %s %s: %s', dns_name,
logger.error('DNS syntax error: %s %s %s: %s', dns_name,
dns.rdatatype.to_text(rdtype), value, e)
raise
@@ -3376,14 +3379,14 @@ class dnsrecord(LDAPObject):
warn_attempts = max_attempts // 2
period = 1 # second
attempt = 0
log_fn = self.log.debug
log_fn = logger.debug
log_fn('querying DNS server: expecting answer {%s}', ldap_rrset)
wait_template = 'waiting for DNS answer {%s}: got {%s} (attempt %s); '\
'waiting %s seconds before next try'
while attempt < max_attempts:
if attempt >= warn_attempts:
log_fn = self.log.warning
log_fn = logger.warning
attempt += 1
try:
dns_answer = resolver.query(dns_name, rdtype,
@@ -3453,13 +3456,13 @@ class dnsrecord(LDAPObject):
else:
e = errors.DNSDataMismatch(expected=ldap_rrset,
got="NXDOMAIN")
self.log.error(e)
logger.error('%s', e)
raise e
except dns.resolver.NoNameservers as e:
# Do not raise exception if we have got SERVFAILs.
# Maybe the user has created an invalid zone intentionally.
self.log.warning('waiting for DNS answer {%s}: got {%s}; '
logger.warning('waiting for DNS answer {%s}: got {%s}; '
'ignoring', ldap_rrset, type(e))
continue
@@ -3469,7 +3472,7 @@ class dnsrecord(LDAPObject):
if err_str:
err_desc += ": %s" % err_str
e = errors.DNSDataMismatch(expected=ldap_rrset, got=err_desc)
self.log.error(e)
logger.error('%s', e)
raise e
def wait_for_modified_entries(self, entries):
@@ -4211,7 +4214,7 @@ class dnsconfig_mod(LDAPUpdate):
# forwarders were changed
for forwarder in forwarders:
try:
validate_dnssec_global_forwarder(forwarder, log=self.log)
validate_dnssec_global_forwarder(forwarder)
except DNSSECSignatureMissingError as e:
messages.add_message(
options['version'],
@@ -4286,8 +4289,7 @@ class dnsforwardzone(DNSZoneBase):
for forwarder in forwarders:
try:
validate_dnssec_zone_forwarder_step1(forwarder, fwzone,
log=self.log)
validate_dnssec_zone_forwarder_step1(forwarder, fwzone)
except UnresolvableRecordError as e:
messages.add_message(
options['version'],
@@ -4320,7 +4322,7 @@ class dnsforwardzone(DNSZoneBase):
if not ipa_dns_masters:
# something very bad happened, DNS is installed, but no IPA DNS
# servers available
self.log.error("No IPA DNS server can be found, but integrated DNS "
logger.error("No IPA DNS server can be found, but integrated DNS "
"is installed")
return
@@ -4335,7 +4337,7 @@ class dnsforwardzone(DNSZoneBase):
break
if not ipa_dns_ip:
self.log.error("Cannot resolve %s hostname", ipa_dns_masters[0])
logger.error("Cannot resolve %s hostname", ipa_dns_masters[0])
return
# sleep a bit, adding new zone to BIND from LDAP may take a while
@@ -4344,8 +4346,7 @@ class dnsforwardzone(DNSZoneBase):
# Test if IPA is able to receive replies from forwarders
try:
validate_dnssec_zone_forwarder_step2(ipa_dns_ip, fwzone,
log=self.log)
validate_dnssec_zone_forwarder_step2(ipa_dns_ip, fwzone)
except DNSSECValidationError as e:
messages.add_message(
options['version'],

View File

@@ -241,6 +241,8 @@ digits and nothing else follows.
import datetime
import json
import logging
from lxml import etree
import time
import contextlib
@@ -262,6 +264,8 @@ if api.env.in_server:
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
# These are general status return values used when
# CMSServlet.outputError() is invoked.
CMS_SUCCESS = 0
@@ -1387,7 +1391,7 @@ class ra(rabase.rabase, RestClient):
if detail is not None:
err_msg = u'%s (%s)' % (err_msg, detail)
self.error('%s.%s(): %s', type(self).__name__, func_name, err_msg)
logger.error('%s.%s(): %s', type(self).__name__, func_name, err_msg)
raise errors.CertificateOperationError(error=err_msg)
def _request(self, url, port, **kw):
@@ -1434,7 +1438,7 @@ class ra(rabase.rabase, RestClient):
self.raise_certificate_operation_error('get_parse_result_xml',
detail=str(e))
result = parse_func(doc)
self.debug(
logger.debug(
"%s() xml_text:\n%r\nparse_result:\n%r",
parse_func.__name__, xml_text, result)
return result
@@ -1473,7 +1477,7 @@ class ra(rabase.rabase, RestClient):
"""
self.debug('%s.check_request_status()', type(self).__name__)
logger.debug('%s.check_request_status()', type(self).__name__)
# Call CMS
http_status, _http_headers, http_body = (
@@ -1554,7 +1558,7 @@ class ra(rabase.rabase, RestClient):
"""
self.debug('%s.get_certificate()', type(self).__name__)
logger.debug('%s.get_certificate()', type(self).__name__)
# Convert serial number to integral type from string to properly handle
# radix issues. Note: the int object constructor will properly handle large
@@ -1621,7 +1625,7 @@ class ra(rabase.rabase, RestClient):
``unicode``, decimal representation
"""
self.debug('%s.request_certificate()', type(self).__name__)
logger.debug('%s.request_certificate()', type(self).__name__)
# Call CMS
template = u'''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
@@ -1711,7 +1715,7 @@ class ra(rabase.rabase, RestClient):
+---------------+---------------+---------------+
"""
self.debug('%s.revoke_certificate()', type(self).__name__)
logger.debug('%s.revoke_certificate()', type(self).__name__)
if type(revocation_reason) is not int:
raise TypeError(TYPE_ERROR % ('revocation_reason', int, revocation_reason, type(revocation_reason)))
@@ -1773,7 +1777,7 @@ class ra(rabase.rabase, RestClient):
+---------------+---------------+---------------+
"""
self.debug('%s.take_certificate_off_hold()', type(self).__name__)
logger.debug('%s.take_certificate_off_hold()', type(self).__name__)
# Convert serial number to integral type from string to properly handle
# radix issues. Note: the int object constructor will properly handle large
@@ -1825,7 +1829,7 @@ class ra(rabase.rabase, RestClient):
ts = time.strptime(value, '%Y-%m-%d')
return int(time.mktime(ts) * 1000)
self.debug('%s.find()', type(self).__name__)
logger.debug('%s.find()', type(self).__name__)
# Create the root element
page = etree.Element('CertSearchRequest')
@@ -1901,7 +1905,7 @@ class ra(rabase.rabase, RestClient):
e.text = str(booloptions[opt]).lower()
payload = etree.tostring(doc, pretty_print=False, xml_declaration=True, encoding='UTF-8')
self.debug('%s.find(): request: %s', type(self).__name__, payload)
logger.debug('%s.find(): request: %s', type(self).__name__, payload)
url = 'http://%s/ca/rest/certs/search?size=%d' % (
ipautil.format_netloc(self.ca_host, 80),
@@ -1915,7 +1919,7 @@ class ra(rabase.rabase, RestClient):
try:
response = opener.open(req)
except urllib.error.HTTPError as e:
self.debug('HTTP Response code: %d' % e.getcode())
logger.debug('HTTP Response code: %d', e.getcode())
if e.getcode() == 501:
self.raise_certificate_operation_error('find',
detail=_('find not supported on CAs upgraded from 9 to 10'))
@@ -1926,7 +1930,7 @@ class ra(rabase.rabase, RestClient):
detail=e.reason)
data = response.readlines()
self.debug('%s.find(): response: %s', type(self).__name__, data)
logger.debug('%s.find(): response: %s', type(self).__name__, data)
parser = etree.XMLParser()
try:
doc = etree.fromstring(data[0], parser)
@@ -2128,7 +2132,7 @@ class ra_lightweight_ca(RestClient):
try:
return json.loads(ipautil.decode_json(resp_body))
except Exception as e:
self.log.debug(e, exc_info=True)
logger.debug('%s', e, exc_info=True)
raise errors.RemoteRetrieveError(
reason=_("Response from CA was not valid JSON"))
@@ -2138,7 +2142,7 @@ class ra_lightweight_ca(RestClient):
try:
return json.loads(ipautil.decode_json(resp_body))
except Exception as e:
self.log.debug(e, exc_info=True)
logger.debug('%s', e, exc_info=True)
raise errors.RemoteRetrieveError(
reason=_("Response from CA was not valid JSON"))

View File

@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipalib import api, errors, output, util
from ipalib import Command, Str, Flag, Int
from ipalib import _
@@ -213,6 +215,8 @@ EXAMPLES:
Not matched rules: can_login
""")
logger = logging.getLogger(__name__)
register = Registry()
def _convert_to_ipa_rule(rule):
@@ -479,10 +483,11 @@ class hbactest(Command):
code, rule_name = e.args
if code == pyhbac.HBAC_EVAL_ERROR:
error_rules.append(rule_name)
self.log.info('Native IPA HBAC rule "%s" parsing error: %s' % \
(rule_name, pyhbac.hbac_result_string(code)))
logger.info('Native IPA HBAC rule "%s" parsing error: '
'%s',
rule_name, pyhbac.hbac_result_string(code))
except (TypeError, IOError) as info:
self.log.error('Native IPA HBAC module error: %s' % info)
logger.error('Native IPA HBAC module error: %s', info)
access_granted = len(matched_rules) > 0
else:

View File

@@ -20,6 +20,8 @@
from __future__ import absolute_import
import logging
import dns.resolver
import six
@@ -133,6 +135,8 @@ EXAMPLES:
ipa host-allow-create-keytab test2 --users=tuser1
""")
logger = logging.getLogger(__name__)
register = Registry()
@@ -1073,7 +1077,7 @@ class host_find(LDAPSearch):
reason=e,
)
)
self.log.error("Invalid certificate: {err}".format(err=e))
logger.error("Invalid certificate: %s", e)
del(entry_attrs['usercertificate'])
set_kerberos_attrs(entry_attrs, options)

View File

@@ -21,6 +21,8 @@
Joining an IPA domain
"""
import logging
import six
from ipalib import Registry, api
@@ -32,6 +34,8 @@ from ipaserver.install import installutils
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
register = Registry()
@@ -100,7 +104,7 @@ class join(Command):
dn = attrs_list['dn']
# No error raised so far means that host entry exists
self.log.info('Host entry for %s already exists, '
logger.info('Host entry for %s already exists, '
'joining may fail on the client side '
'if not forced', hostname)
@@ -109,7 +113,7 @@ class join(Command):
if 'krbprincipalname' not in attrs_list:
service = "host/%s@%s" % (hostname, api.env.realm)
api.Command['host_mod'](hostname, krbprincipalname=service)
self.log.info('No principal set, setting to %s', service)
logger.info('No principal set, setting to %s', service)
# It exists, can we write the password attributes?
allowed = ldap.can_write(dn, 'krblastpwdchange')

View File

@@ -27,6 +27,7 @@ Backend plugin for LDAP.
# binding encodes them into the appropriate representation. This applies to
# everything except the CrudBackend methods, where dn is part of the entry dict.
import logging
import os
import ldap as _ldap
@@ -56,6 +57,8 @@ from ipalib import Registry, errors, _
from ipalib.crud import CrudBackend
from ipalib.request import context
logger = logging.getLogger(__name__)
register = Registry()
_missing = object()
@@ -402,7 +405,7 @@ class ldap2(CrudBackend, LDAPClient):
assert isinstance(dn, DN)
assert isinstance(group_dn, DN)
self.log.debug(
logger.debug(
"add_entry_to_group: dn=%s group_dn=%s member_attr=%s",
dn, group_dn, member_attr)
@@ -432,7 +435,7 @@ class ldap2(CrudBackend, LDAPClient):
assert isinstance(dn, DN)
assert isinstance(group_dn, DN)
self.log.debug(
logger.debug(
"remove_entry_from_group: dn=%s group_dn=%s member_attr=%s",
dn, group_dn, member_attr)

View File

@@ -17,6 +17,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
from ldap import MOD_ADD
from ldap import SCOPE_BASE, SCOPE_ONELEVEL, SCOPE_SUBTREE
@@ -132,6 +133,8 @@ for each user added plus a summary when the default user group is
updated.
""")
logger = logging.getLogger(__name__)
register = Registry()
# USER MIGRATION CALLBACKS AND VARS
@@ -758,11 +761,10 @@ migration process might be incomplete\n''')
truncated = False
entries = []
if truncated:
self.log.error(
'%s: %s' % (
logger.error(
'%s: %s',
ldap_obj.name, self.truncated_err_msg
)
)
blacklists = {}
for blacklist in ('oc_blacklist', 'attr_blacklist'):

View File

@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import six
from ipalib import api, errors, krb_utils
@@ -54,6 +56,8 @@ EXAMPLES:
ipa passwd tuser1
""")
logger = logging.getLogger(__name__)
register = Registry()
# We only need to prompt for the current password when changing a password
@@ -134,7 +138,8 @@ class passwd(Command):
if principal == getattr(context, 'principal') and \
current_password == MAGIC_VALUE:
# No cheating
self.log.warning('User attempted to change password using magic value')
logger.warning('User attempted to change password using magic '
'value')
raise errors.ACIError(info=_('Invalid credentials'))
if current_password == MAGIC_VALUE:

View File

@@ -17,6 +17,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
import traceback
@@ -103,6 +104,8 @@ EXAMPLES:
ipa permission-add --attrs=member --permissions=write --type=group "Manage Group Members"
""")
logger = logging.getLogger(__name__)
register = Registry()
_DEPRECATED_OPTION_ALIASES = {
@@ -610,7 +613,7 @@ class permission(baseldap.LDAPObject):
location = permission_entry.single_value.get('ipapermlocation',
self.api.env.basedn)
self.log.debug('Adding ACI %r to %s' % (acistring, location))
logger.debug('Adding ACI %r to %s', acistring, location)
try:
entry = ldap.get_entry(location, ['aci'])
except errors.NotFound:
@@ -654,15 +657,15 @@ class permission(baseldap.LDAPObject):
acidn = acientry.dn # pylint: disable=E1103
if acistring is not None:
self.log.debug('Removing ACI %r from %s' % (acistring, acidn))
logger.debug('Removing ACI %r from %s', acistring, acidn)
acientry['aci'].remove(acistring)
if new_acistring:
self.log.debug('Adding ACI %r to %s' % (new_acistring, acidn))
logger.debug('Adding ACI %r to %s', new_acistring, acidn)
acientry.setdefault('aci', []).append(new_acistring)
try:
ldap.update_entry(acientry)
except errors.EmptyModlist:
self.log.debug('No changes to ACI')
logger.debug('No changes to ACI')
return acientry, acistring
def _get_aci_entry_and_string(self, permission_entry, name=None,
@@ -696,7 +699,7 @@ class permission(baseldap.LDAPObject):
try:
aci = ACI(acistring)
except SyntaxError as e:
self.log.warning('Unparseable ACI %s: %s (at %s)',
logger.warning('Unparseable ACI %s: %s (at %s)',
acistring, e, location)
continue
if aci.name == wanted_aciname:
@@ -1194,7 +1197,7 @@ class permission_mod(baseldap.LDAPUpdate):
try:
context.old_aci_info = self.obj.remove_aci(old_entry)
except errors.NotFound as e:
self.log.error('permission ACI not found: %s' % e)
logger.error('permission ACI not found: %s', e)
# To pass data to postcallback, we currently need to use the context
context.old_entry = old_entry
@@ -1212,8 +1215,8 @@ class permission_mod(baseldap.LDAPUpdate):
# Try to roll back the old ACI
entry, old_aci_string = old_aci_info
if old_aci_string:
self.log.warning('Reverting ACI on %s to %s' % (entry.dn,
old_aci_string))
logger.warning('Reverting ACI on %s to %s', entry.dn,
old_aci_string)
entry['aci'].append(old_aci_string)
self.Backend.ldap2.update_entry(entry)
@@ -1229,8 +1232,8 @@ class permission_mod(baseldap.LDAPUpdate):
# Don't revert attribute which doesn't exist in LDAP
entry.pop('attributelevelrights', None)
self.log.error('Error updating ACI: %s' % traceback.format_exc())
self.log.warning('Reverting entry')
logger.error('Error updating ACI: %s', traceback.format_exc())
logger.warning('Reverting entry')
old_entry.reset_modlist(entry)
ldap.update_entry(old_entry)
self._revert_aci()
@@ -1326,7 +1329,7 @@ class permission_find(baseldap.LDAPSearch):
root_entry = ldap.get_entry(DN(api.env.basedn), ['aci'])
except errors.NotFound:
legacy_entries = ()
self.log.debug('potential legacy entries: %s', len(legacy_entries))
logger.debug('potential legacy entries: %s', len(legacy_entries))
nonlegacy_names = {e.single_value['cn'] for e in entries}
for entry in legacy_entries:
if entry.single_value['cn'] in nonlegacy_names:

View File

@@ -18,6 +18,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipalib import api
from ipalib import Int, Str, DNParam
from ipalib import errors
@@ -81,6 +83,8 @@ EXAMPLES:
ipa pwpolicy-mod --minclasses=2 localadmins
""")
logger = logging.getLogger(__name__)
register = Registry()
@register()
@@ -435,7 +439,7 @@ class pwpolicy_add(LDAPCreate):
def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN)
self.log.info('%r' % entry_attrs)
logger.info('%r', entry_attrs)
# attribute rights are not allowed for pwpolicy_add
self.obj.add_cospriority(entry_attrs, keys[-1], rights=False)
self.obj.convert_time_for_output(entry_attrs, **options)

View File

@@ -2,6 +2,8 @@
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
import logging
import dbus
import dbus.mainloop.glib
import ldap
@@ -41,6 +43,8 @@ EXAMPLES:
ipa server-show ipa.example.com
""")
logger = logging.getLogger(__name__)
register = Registry()
@@ -575,9 +579,9 @@ class server_del(LDAPDelete):
mod = [(ldap.MOD_DELETE, 'memberPrincipal', member_principal)]
conn.conn.modify_s(str(dn), mod)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
self.log.debug(
"Replica (%s) memberPrincipal (%s) not found in %s" %
(master, member_principal, dn))
logger.debug(
"Replica (%s) memberPrincipal (%s) not found in %s",
master, member_principal, dn)
except Exception as e:
self.add_message(
messages.ServerRemovalWarning(
@@ -803,11 +807,11 @@ class server_del(LDAPDelete):
return
time.sleep(2)
if i == 2: # taking too long, something is wrong, report
self.log.info(
logger.info(
"Waiting for removal of replication agreements")
if i > 90:
self.log.info("Taking too long, skipping")
self.log.info("Following segments were not deleted:")
logger.info("Taking too long, skipping")
logger.info("Following segments were not deleted:")
self.add_message(messages.ServerRemovalWarning(
message=_("Following segments were not deleted:")))
for s in left:
@@ -833,14 +837,14 @@ class server_del(LDAPDelete):
# If the server was already deleted, we can expect that all
# removals had been done in previous run and dangling segments
# were not deleted.
self.log.info(
logger.info(
"Skipping replication agreement deletion check for "
"suffix '{0}'".format(suffix_name))
"suffix '%s'", suffix_name)
continue
self.log.info(
"Checking for deleted segments in suffix '{0}'".format(
suffix_name))
logger.info(
"Checking for deleted segments in suffix '%s",
suffix_name)
wait_for_segment_removal(
hostname,

View File

@@ -19,6 +19,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from cryptography.hazmat.primitives import hashes
import six
@@ -116,6 +118,8 @@ EXAMPLES:
""")
logger = logging.getLogger(__name__)
register = Registry()
output_params = (
@@ -791,7 +795,7 @@ class service_find(LDAPSearch):
reason=e
)
)
self.log.error("Invalid certificate: {err}".format(err=e))
logger.error("Invalid certificate: %s", e)
del(entry_attrs['usercertificate'])
set_kerberos_attrs(entry_attrs, options)
@@ -829,7 +833,7 @@ class service_show(LDAPRetrieve):
reason=e,
)
)
self.log.error("Invalid certificate: {err}".format(err=e))
logger.error("Invalid certificate: %s", e)
del(entry_attrs['usercertificate'])
set_kerberos_attrs(entry_attrs, options)

View File

@@ -2,10 +2,14 @@
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
import logging
from ipalib import Command
from ipalib.request import context
from ipalib.plugable import Registry
logger = logging.getLogger(__name__)
register = Registry()
@@ -19,7 +23,7 @@ class session_logout(Command):
def execute(self, *args, **options):
ccache_name = getattr(context, 'ccache_name', None)
if ccache_name is None:
self.debug('session logout command: no ccache_name found')
logger.debug('session logout command: no ccache_name found')
else:
delattr(context, 'ccache_name')

View File

@@ -17,6 +17,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import posixpath
from copy import deepcopy
@@ -101,6 +102,8 @@ EXAMPLES:
""")
logger = logging.getLogger(__name__)
register = Registry()
@@ -449,7 +452,8 @@ class stageuser_find(baseuser_find):
# but then they have to create inetOrgPerson stage user
stagefilter = filter.replace(container_filter,
"(|%s(objectclass=inetOrgPerson))" % container_filter)
self.log.debug("stageuser_find: pre_callback new filter=%s " % (stagefilter))
logger.debug("stageuser_find: pre_callback new filter=%s ",
stagefilter)
return (stagefilter, base_dn, scope)
def post_callback(self, ldap, entries, truncated, *args, **options):
@@ -583,9 +587,10 @@ class stageuser_activate(LDAPQuery):
(isinstance(v, unicode) and v in (u'', None)):
try:
v.decode('utf-8')
self.log.debug("merge: %s:%r wiped" % (attr, v))
logger.debug("merge: %s:%r wiped", attr, v)
except Exception:
self.log.debug("merge %s: [no_print %s]" % (attr, v.__class__.__name__))
logger.debug("merge %s: [no_print %s]",
attr, v.__class__.__name__)
if isinstance(entry_to[attr], (list, tuple)):
# multi value attribute
if v not in entry_to[attr]:
@@ -599,9 +604,10 @@ class stageuser_activate(LDAPQuery):
else:
try:
v.decode('utf-8')
self.log.debug("Add: %s:%r" % (attr, v))
logger.debug("Add: %s:%r", attr, v)
except Exception:
self.log.debug("Add %s: [no_print %s]" % (attr, v.__class__.__name__))
logger.debug("Add %s: [no_print %s]",
attr, v.__class__.__name__)
if isinstance(entry_to[attr], (list, tuple)):
# multi value attribute
@@ -697,7 +703,7 @@ class stageuser_activate(LDAPQuery):
del result_entry['description']
for (k, v) in new_entry_attrs.items():
self.log.debug("new entry: k=%r and v=%r)" % (k, v))
logger.debug("new entry: k=%r and v=%r)", k, v)
self._build_new_entry(ldap, staging_dn, entry_attrs, new_entry_attrs)
@@ -710,10 +716,12 @@ class stageuser_activate(LDAPQuery):
self._exc_wrapper(args, options, ldap.delete_entry)(staging_dn)
except:
try:
self.log.error("Fail to delete the Staging user after activating it %s " % (staging_dn))
logger.error("Fail to delete the Staging user after "
"activating it %s ", staging_dn)
self._exc_wrapper(args, options, ldap.delete_entry)(active_dn)
except Exception:
self.log.error("Fail to cleanup activation. The user remains active %s" % (active_dn))
logger.error("Fail to cleanup activation. The user remains "
"active %s", active_dn)
raise
# add the user we just created into the default primary group

View File

@@ -18,6 +18,8 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import six
from ipalib.messages import (
@@ -155,6 +157,8 @@ particular type.
ipa trustconfig-mod --type ad --fallback-primary-group "Default SMB Group"
""")
logger = logging.getLogger(__name__)
register = Registry()
# Trust type is a combination of ipanttrusttype and ipanttrustattributes
@@ -437,7 +441,8 @@ def add_range(myapi, trustinstance, range_name, dom_sid, *keys, **options):
# Return the values that were generated inside this function
return range_type, range_size, base_id
def fetch_trusted_domains_over_dbus(myapi, log, forest_name):
def fetch_trusted_domains_over_dbus(myapi, forest_name):
if not _bindings_installed:
return
# Calling oddjobd-activated service via DBus has some quirks:
@@ -454,12 +459,13 @@ def fetch_trusted_domains_over_dbus(myapi, log, forest_name):
fetch_domains_method = intf.get_dbus_method('fetch_domains', dbus_interface=DBUS_IFACE_TRUST)
(_ret, _stdout, _stderr) = fetch_domains_method(forest_name)
except dbus.DBusException as e:
log.error('Failed to call %(iface)s.fetch_domains helper.'
'DBus exception is %(exc)s.' % dict(iface=DBUS_IFACE_TRUST, exc=str(e)))
logger.error('Failed to call %s.fetch_domains helper.'
'DBus exception is %s.', DBUS_IFACE_TRUST, str(e))
if _ret != 0:
log.error('Helper was called for forest %(forest)s, return code is %(ret)d' % dict(forest=forest_name, ret=_ret))
log.error('Standard output from the helper:\n%s---\n' % (_stdout))
log.error('Error output from the helper:\n%s--\n' % (_stderr))
logger.error('Helper was called for forest %s, return code is %d',
forest_name, _ret)
logger.error('Standard output from the helper:\n%s---\n', _stdout)
logger.error('Error output from the helper:\n%s--\n', _stderr)
raise errors.ServerCommandError(server=myapi.env.host,
error=_('Fetching domains from trusted forest failed. '
'See details in the error_log'))
@@ -784,7 +790,7 @@ sides.
# object credentials to authenticate to AD with Kerberos,
# run DCE RPC calls to do discovery and will call
# add_new_domains_from_trust() on its own.
fetch_trusted_domains_over_dbus(self.api, self.log, result['value'])
fetch_trusted_domains_over_dbus(self.api, result['value'])
# Format the output into human-readable values unless `--raw` is given
self._format_trust_attrs(result, **options)
@@ -1743,7 +1749,7 @@ class trust_fetch_domains(LDAPRetrieve):
# With privilege separation we also cannot authenticate as
# HTTP/ principal because we have no access to its key material.
# Thus, we'll use DBus call out to oddjobd helper in all cases
fetch_trusted_domains_over_dbus(self.api, self.log, keys[0])
fetch_trusted_domains_over_dbus(self.api, keys[0])
result['summary'] = unicode(_('List of trust domains successfully '
'refreshed. Use trustdomain-find '
'command to list them.'))

View File

@@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import time
from time import gmtime, strftime
import posixpath
@@ -113,6 +114,8 @@ EXAMPLES:
ipa user-del tuser1
""")
logger = logging.getLogger(__name__)
register = Registry()
@@ -640,7 +643,7 @@ class user_del(baseuser_del):
dn = self.obj.get_either_dn(pkey, **options)
delete_dn = DN(dn[0], delete_container)
ldap = self.obj.backend
self.log.debug("preserve move %s -> %s" % (dn, delete_dn))
logger.debug("preserve move %s -> %s", dn, delete_dn)
if dn.endswith(delete_container):
raise errors.ExecutionError(
@@ -1108,7 +1111,7 @@ class user_status(LDAPQuery):
)
except errors.NotFound:
# If this happens we have some pretty serious problems
self.error('No IPA masters found!')
logger.error('No IPA masters found!')
entries = []
count = 0
@@ -1121,7 +1124,8 @@ class user_status(LDAPQuery):
other_ldap = LDAPClient(ldap_uri='ldap://%s' % host)
other_ldap.gssapi_bind()
except Exception as e:
self.error("user_status: Connecting to %s failed with %s" % (host, str(e)))
logger.error("user_status: Connecting to %s failed with "
"%s", host, str(e))
newresult = {'dn': dn}
newresult['server'] = _("%(host)s failed: %(error)s") % dict(host=host, error=str(e))
entries.append(newresult)
@@ -1141,7 +1145,8 @@ class user_status(LDAPQuery):
newtime = time.strptime(newresult[attr][0], '%Y%m%d%H%M%SZ')
newresult[attr][0] = unicode(time.strftime('%Y-%m-%dT%H:%M:%SZ', newtime))
except Exception as e:
self.debug("time conversion failed with %s" % str(e))
logger.debug("time conversion failed with %s",
str(e))
newresult['server'] = host
if options.get('raw', False):
time_format = '%Y%m%d%H%M%SZ'
@@ -1157,7 +1162,8 @@ class user_status(LDAPQuery):
except errors.NotFound:
self.api.Object.user.handle_not_found(*keys)
except Exception as e:
self.error("user_status: Retrieving status for %s failed with %s" % (dn, str(e)))
logger.error("user_status: Retrieving status for %s failed "
"with %s", dn, str(e))
newresult = {'dn': dn}
newresult['server'] = _("%(host)s failed") % dict(host=host)
entries.append(newresult)

View File

@@ -20,11 +20,17 @@
"""
Base classes for non-LDAP backend plugins.
"""
import logging
from ipalib import Command
from ipalib import errors
from ipapython.dn import DN
from ipalib.text import _
logger = logging.getLogger(__name__)
class VirtualCommand(Command):
"""
A command that doesn't use the LDAP backend but wants to use the
@@ -54,7 +60,7 @@ class VirtualCommand(Command):
operation = self.operation
ldap = self.api.Backend.ldap2
self.log.debug("IPA: virtual verify %s" % operation)
logger.debug("IPA: virtual verify %s", operation)
operationdn = DN(('cn', operation), self.api.env.container_virtual, self.api.env.basedn)

View File

@@ -23,6 +23,7 @@ RPC server.
Also see the `ipalib.rpc` module.
"""
import logging
from xml.sax.saxutils import escape
import os
import traceback
@@ -67,6 +68,8 @@ from requests.auth import AuthBase
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
HTTP_STATUS_SUCCESS = '200 Success'
HTTP_STATUS_SERVER_ERROR = '500 Internal Server Error'
@@ -138,7 +141,7 @@ class HTTP_Status(plugable.Plugin):
status = '404 Not Found'
response_headers = [('Content-Type', 'text/html; charset=utf-8')]
self.info('%s: URL="%s", %s', status, url, message)
logger.info('%s: URL="%s", %s', status, url, message)
start_response(status, response_headers)
output = _not_found_template % dict(url=escape(url))
return [output.encode('utf-8')]
@@ -150,7 +153,7 @@ class HTTP_Status(plugable.Plugin):
status = '400 Bad Request'
response_headers = [('Content-Type', 'text/html; charset=utf-8')]
self.info('%s: %s', status, message)
logger.info('%s: %s', status, message)
start_response(status, response_headers)
output = _bad_request_template % dict(message=escape(message))
@@ -163,7 +166,7 @@ class HTTP_Status(plugable.Plugin):
status = HTTP_STATUS_SERVER_ERROR
response_headers = [('Content-Type', 'text/html; charset=utf-8')]
self.error('%s: %s', status, message)
logger.error('%s: %s', status, message)
start_response(status, response_headers)
output = _internal_error_template % dict(message=escape(message))
@@ -178,7 +181,7 @@ class HTTP_Status(plugable.Plugin):
if reason:
response_headers.append(('X-IPA-Rejection-Reason', reason))
self.info('%s: %s', status, message)
logger.info('%s: %s', status, message)
start_response(status, response_headers)
output = _unauthorized_template % dict(message=escape(message))
@@ -257,7 +260,7 @@ class wsgi_dispatch(Executioner, HTTP_Status):
return key in self.__apps
def __call__(self, environ, start_response):
self.debug('WSGI wsgi_dispatch.__call__:')
logger.debug('WSGI wsgi_dispatch.__call__:')
try:
return self.route(environ, start_response)
finally:
@@ -288,7 +291,7 @@ class wsgi_dispatch(Executioner, HTTP_Status):
raise Exception('%s.mount(): cannot replace %r with %r at %r' % (
self.name, self.__apps[key], app, key)
)
self.debug('Mounting %r at %r', app, key)
logger.debug('Mounting %r at %r', app, key)
self.__apps[key] = app
@@ -367,10 +370,11 @@ class WSGIExecutioner(Executioner):
result = command(*args, **options)
except PublicError as e:
if self.api.env.debug:
self.debug('WSGI wsgi_execute PublicError: %s', traceback.format_exc())
logger.debug('WSGI wsgi_execute PublicError: %s',
traceback.format_exc())
error = e
except Exception as e:
self.exception(
logger.exception(
'non-public: %s: %s', e.__class__.__name__, str(e)
)
error = InternalError()
@@ -382,8 +386,9 @@ class WSGIExecutioner(Executioner):
try:
params = command.args_options_2_params(*args, **options)
except Exception as e:
self.info(
'exception %s caught when converting options: %s', e.__class__.__name__, str(e)
logger.info(
'exception %s caught when converting options: %s',
e.__class__.__name__, str(e)
)
# get at least some context of what is going on
params = options
@@ -392,14 +397,14 @@ class WSGIExecutioner(Executioner):
result_string = type(error).__name__
else:
result_string = 'SUCCESS'
self.info('[%s] %s: %s(%s): %s',
logger.info('[%s] %s: %s(%s): %s',
type(self).__name__,
principal,
name,
', '.join(command._repr_iter(**params)),
result_string)
else:
self.info('[%s] %s: %s: %s',
logger.info('[%s] %s: %s: %s',
type(self).__name__,
principal,
name,
@@ -418,7 +423,7 @@ class WSGIExecutioner(Executioner):
WSGI application for execution.
"""
self.debug('WSGI WSGIExecutioner.__call__:')
logger.debug('WSGI WSGIExecutioner.__call__:')
try:
status = HTTP_STATUS_SUCCESS
response = self.wsgi_execute(environ)
@@ -428,7 +433,7 @@ class WSGIExecutioner(Executioner):
headers = [('Content-Type',
self.content_type + '; charset=utf-8')]
except Exception:
self.exception('WSGI %s.__call__():', self.name)
logger.exception('WSGI %s.__call__():', self.name)
status = HTTP_STATUS_SERVER_ERROR
response = status.encode('utf-8')
headers = [('Content-Type', 'text/plain; charset=utf-8')]
@@ -463,7 +468,7 @@ class jsonserver(WSGIExecutioner, HTTP_Status):
'''
'''
self.debug('WSGI jsonserver.__call__:')
logger.debug('WSGI jsonserver.__call__:')
response = super(jsonserver, self).__call__(environ, start_response)
return response
@@ -598,7 +603,7 @@ class KerberosSession(HTTP_Status):
headers = []
response = b''
self.debug('%s need login', status)
logger.debug('%s need login', status)
start_response(status, headers)
return [response]
@@ -607,13 +612,13 @@ class KerberosSession(HTTP_Status):
# If we have a ccache ...
ccache_name = environ.get('KRB5CCNAME')
if ccache_name is None:
self.debug('no ccache, need login')
logger.debug('no ccache, need login')
return
# ... make sure we have a name ...
principal = environ.get('GSS_NAME')
if principal is None:
self.debug('no Principal Name, need login')
logger.debug('no Principal Name, need login')
return
# ... and use it to resolve the ccache name (Issue: 6972 )
@@ -623,7 +628,8 @@ class KerberosSession(HTTP_Status):
creds = get_credentials_if_valid(name=gss_name,
ccache_name=ccache_name)
if not creds:
self.debug('ccache expired or invalid, deleting session, need login')
logger.debug(
'ccache expired or invalid, deleting session, need login')
return
return ccache_name
@@ -662,7 +668,7 @@ class KerberosWSGIExecutioner(WSGIExecutioner, KerberosSession):
super(KerberosWSGIExecutioner, self)._on_finalize()
def __call__(self, environ, start_response):
self.debug('KerberosWSGIExecutioner.__call__:')
logger.debug('KerberosWSGIExecutioner.__call__:')
user_ccache=environ.get('KRB5CCNAME')
object.__setattr__(
@@ -674,7 +680,7 @@ class KerberosWSGIExecutioner(WSGIExecutioner, KerberosSession):
status = HTTP_STATUS_SERVER_ERROR
self.log.error(
logger.error(
'%s: %s', status,
'KerberosWSGIExecutioner.__call__: '
'KRB5CCNAME not defined in HTTP request environment')
@@ -767,11 +773,13 @@ class xmlserver(KerberosWSGIExecutioner):
def marshal(self, result, error, _id=None,
version=VERSION_WITHOUT_CAPABILITIES):
if error:
self.debug('response: %s: %s', error.__class__.__name__, str(error))
logger.debug('response: %s: %s',
error.__class__.__name__, str(error))
response = Fault(error.errno, error.strerror)
else:
if isinstance(result, dict):
self.debug('response: entries returned %d', result.get('count', 1))
logger.debug('response: entries returned %d',
result.get('count', 1))
response = (result,)
dump = xml_dumps(response, version, methodresponse=True)
return dump.encode('utf-8')
@@ -794,7 +802,7 @@ class jsonserver_session(jsonserver, KerberosSession):
'''
'''
self.debug('WSGI jsonserver_session.__call__:')
logger.debug('WSGI jsonserver_session.__call__:')
# Redirect to login if no Kerberos credentials
ccache_name = self.get_environ_creds(environ)
@@ -834,7 +842,7 @@ class KerberosLogin(Backend, KerberosSession):
self.api.Backend.wsgi_dispatch.mount(self, self.key)
def __call__(self, environ, start_response):
self.debug('WSGI KerberosLogin.__call__:')
logger.debug('WSGI KerberosLogin.__call__:')
# Redirect to login if no Kerberos credentials
user_ccache_name = self.get_environ_creds(environ)
@@ -852,7 +860,7 @@ class login_x509(KerberosLogin):
key = '/session/login_x509'
def __call__(self, environ, start_response):
self.debug('WSGI login_x509.__call__:')
logger.debug('WSGI login_x509.__call__:')
if 'KRB5CCNAME' not in environ:
return self.unauthorized(
@@ -872,7 +880,7 @@ class login_password(Backend, KerberosSession):
self.api.Backend.wsgi_dispatch.mount(self, self.key)
def __call__(self, environ, start_response):
self.debug('WSGI login_password.__call__:')
logger.debug('WSGI login_password.__call__:')
# Get the user and password parameters from the request
content_type = environ.get('CONTENT_TYPE', '').lower()
@@ -958,7 +966,7 @@ class login_password(Backend, KerberosSession):
armor_path = os.path.join(paths.IPA_CCACHES,
"armor_{}".format(os.getpid()))
self.debug('Obtaining armor in ccache %s', armor_path)
logger.debug('Obtaining armor in ccache %s', armor_path)
try:
kinit_armor(
@@ -966,7 +974,7 @@ class login_password(Backend, KerberosSession):
pkinit_anchors=[paths.KDC_CERT, paths.KDC_CA_BUNDLE_PEM],
)
except RuntimeError as e:
self.error("Failed to obtain armor cache")
logger.error("Failed to obtain armor cache")
# We try to continue w/o armor, 2FA will be impacted
armor_path = None
@@ -980,7 +988,7 @@ class login_password(Backend, KerberosSession):
lifetime=self.api.env.kinit_lifetime)
if armor_path:
self.debug('Cleanup the armor ccache')
logger.debug('Cleanup the armor ccache')
ipautil.run([paths.KDESTROY, '-A', '-c', armor_path],
env={'KRB5CCNAME': armor_path}, raiseonerr=False)
except RuntimeError as e:
@@ -1009,7 +1017,7 @@ class change_password(Backend, HTTP_Status):
self.api.Backend.wsgi_dispatch.mount(self, self.key)
def __call__(self, environ, start_response):
self.info('WSGI change_password.__call__:')
logger.info('WSGI change_password.__call__:')
# Get the user and password parameters from the request
content_type = environ.get('CONTENT_TYPE', '').lower()
@@ -1040,7 +1048,8 @@ class change_password(Backend, HTTP_Status):
return self.bad_request(environ, start_response, "no %s specified" % field)
# start building the response
self.info("WSGI change_password: start password change of user '%s'", data['user'])
logger.info("WSGI change_password: start password change of user '%s'",
data['user'])
status = HTTP_STATUS_SUCCESS
response_headers = [('Content-Type', 'text/html; charset=utf-8')]
title = 'Password change rejected'
@@ -1061,7 +1070,8 @@ class change_password(Backend, HTTP_Status):
message = 'The old password or username is not correct.'
except Exception as e:
message = "Could not connect to LDAP server."
self.error("change_password: cannot authenticate '%s' to LDAP server: %s",
logger.error("change_password: cannot authenticate '%s' to LDAP "
"server: %s",
data['user'], str(e))
else:
try:
@@ -1072,7 +1082,8 @@ class change_password(Backend, HTTP_Status):
message = "Password change was rejected: %s" % escape(str(e))
except Exception as e:
message = "Could not change the password"
self.error("change_password: cannot change password of '%s': %s",
logger.error("change_password: cannot change password of "
"'%s': %s",
data['user'], str(e))
else:
result = 'ok'
@@ -1082,7 +1093,7 @@ class change_password(Backend, HTTP_Status):
if conn.isconnected():
conn.disconnect()
self.info('%s: %s', status, message)
logger.info('%s: %s', status, message)
response_headers.append(('X-IPA-Pwchange-Result', result))
if policy_error:
@@ -1178,7 +1189,8 @@ class sync_token(Backend, HTTP_Status):
except Exception as e:
result = 'error'
message = "Could not connect to LDAP server."
self.error("token_sync: cannot authenticate '%s' to LDAP server: %s",
logger.error("token_sync: cannot authenticate '%s' to LDAP "
"server: %s",
data['user'], str(e))
finally:
if conn.isconnected():
@@ -1209,7 +1221,7 @@ class xmlserver_session(xmlserver, KerberosSession):
headers = []
response = b''
self.debug('xmlserver_session: %s need login', status)
logger.debug('xmlserver_session: %s need login', status)
start_response(status, headers)
return [response]
@@ -1218,19 +1230,20 @@ class xmlserver_session(xmlserver, KerberosSession):
'''
'''
self.debug('WSGI xmlserver_session.__call__:')
logger.debug('WSGI xmlserver_session.__call__:')
ccache_name = environ.get('KRB5CCNAME')
# Redirect to /ipa/xml if no Kerberos credentials
if ccache_name is None:
self.debug('xmlserver_session.__call_: no ccache, need TGT')
logger.debug('xmlserver_session.__call_: no ccache, need TGT')
return self.need_login(start_response)
# Redirect to /ipa/xml if Kerberos credentials are expired
creds = get_credentials_if_valid(ccache_name=ccache_name)
if not creds:
self.debug('xmlserver_session.__call_: ccache expired, deleting session, need login')
logger.debug('xmlserver_session.__call_: ccache expired, deleting '
'session, need login')
# The request is finished with the ccache, destroy it.
return self.need_login(start_response)

View File

@@ -242,7 +242,7 @@ def mh(request, class_integration_logs):
print(mh.config)
for host in mh.config.get_all_hosts():
host.add_log_collector(collect_log)
cls.log.info('Preparing host %s', host.hostname)
log.info('Preparing host %s', host.hostname)
tasks.prepare_host(host)
setup_class(cls, mh)

View File

@@ -84,6 +84,3 @@ class IntegrationTest(object):
tasks.uninstall_master(replica)
for client in cls.clients:
tasks.uninstall_client(client)
IntegrationTest.log = log_mgr.get_logger(IntegrationTest())

View File

@@ -158,7 +158,7 @@ class TestBackupAndRestore(IntegrationTest):
with restore_checker(self.master):
backup_path = backup(self.master)
self.log.info('Backup path for %s is %s', self.master, backup_path)
log.info('Backup path for %s is %s', self.master, backup_path)
self.master.run_command(['ipa-server-install',
'--uninstall',
@@ -181,7 +181,7 @@ class TestBackupAndRestore(IntegrationTest):
with restore_checker(self.master):
backup_path = backup(self.master)
self.log.info('Backup path for %s is %s', self.master, backup_path)
log.info('Backup path for %s is %s', self.master, backup_path)
self.master.run_command(['ipa-server-install',
'--uninstall',
@@ -295,8 +295,10 @@ class BaseBackupAndRestoreWithDNSSEC(IntegrationTest):
'--dnssec', 'true',
])
assert wait_until_record_is_signed(self.master.ip,
self.example_test_zone, self.log), "Zone is not signed"
assert (
wait_until_record_is_signed(
self.master.ip, self.example_test_zone)
), "Zone is not signed"
backup_path = backup(self.master)
@@ -311,9 +313,10 @@ class BaseBackupAndRestoreWithDNSSEC(IntegrationTest):
self.master.run_command(['ipa-restore', backup_path],
stdin_text=dirman_password + '\nyes')
assert wait_until_record_is_signed(self.master.ip,
self.example_test_zone, self.log), ("Zone is not signed after "
"restore")
assert (
wait_until_record_is_signed(
self.master.ip, self.example_test_zone)
), "Zone is not signed after restore"
tasks.kinit_admin(self.master)
self.master.run_command([
@@ -322,8 +325,10 @@ class BaseBackupAndRestoreWithDNSSEC(IntegrationTest):
'--dnssec', 'true',
])
assert wait_until_record_is_signed(self.master.ip,
self.example2_test_zone, self.log), "A new zone is not signed"
assert (
wait_until_record_is_signed(
self.master.ip, self.example2_test_zone)
), "A new zone is not signed"
class TestBackupAndRestoreWithDNSSEC(BaseBackupAndRestoreWithDNSSEC):

View File

@@ -17,6 +17,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import tempfile
import shutil
@@ -34,6 +35,8 @@ from ipatests.test_integration.base import IntegrationTest
from ipatests.pytest_plugins.integration import tasks
from ipalib.constants import DOMAIN_LEVEL_0
logger = logging.getLogger(__name__)
_DEFAULT = object()
assert_error = tasks.assert_error
@@ -331,11 +334,11 @@ class CALessBase(IntegrationTest):
"""
with open(self.pem_filename) as f:
expected_cacrt = f.read()
self.log.debug('Expected /etc/ipa/ca.crt contents:\n%s',
logger.debug('Expected /etc/ipa/ca.crt contents:\n%s',
expected_cacrt)
expected_binary_cacrt = base64.b64decode(x509.strip_header(
expected_cacrt))
self.log.debug('Expected binary CA cert:\n%r',
logger.debug('Expected binary CA cert:\n%r',
expected_binary_cacrt)
for host in [self.master] + self.replicas:
# Check the LDAP entry
@@ -343,7 +346,7 @@ class CALessBase(IntegrationTest):
entry = ldap.get_entry(DN(('cn', 'CACert'), ('cn', 'ipa'),
('cn', 'etc'), host.domain.basedn))
cert_from_ldap = entry.single_value['cACertificate']
self.log.debug('CA cert from LDAP on %s:\n%r',
logger.debug('CA cert from LDAP on %s:\n%r',
host, cert_from_ldap)
assert cert_from_ldap == expected_binary_cacrt
@@ -354,10 +357,10 @@ class CALessBase(IntegrationTest):
for host in self.get_all_hosts():
# Check the cert PEM file
remote_cacrt = host.get_file_contents(paths.IPA_CA_CRT)
self.log.debug('%s:/etc/ipa/ca.crt contents:\n%s',
logger.debug('%s:/etc/ipa/ca.crt contents:\n%s',
host, remote_cacrt)
binary_cacrt = base64.b64decode(x509.strip_header(remote_cacrt))
self.log.debug('%s: Decoded /etc/ipa/ca.crt:\n%r',
logger.debug('%s: Decoded /etc/ipa/ca.crt:\n%r',
host, binary_cacrt)
assert expected_binary_cacrt == binary_cacrt
@@ -1182,7 +1185,7 @@ class TestIPACommands(CALessBase):
tasks.kinit_admin(cls.master)
cls.client_pem = ''.join(cls.get_pem('ca1/client').splitlines()[1:-1])
cls.log.debug('Client PEM:\n%r' % cls.client_pem)
logger.debug('Client PEM:\n%r', cls.client_pem)
cls.test_hostname = 'testhost.%s' % cls.master.domain.name
cls.test_service = 'test/%s' % cls.test_hostname

View File

@@ -1,6 +1,7 @@
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
import logging
import time
import dns.resolver
import dns.rrset
@@ -11,6 +12,8 @@ from ipatests.test_integration.base import IntegrationTest
from ipatests.pytest_plugins.integration import tasks
from ipapython.dnsutil import DNSName
logger = logging.getLogger(__name__)
IPA_DEFAULT_MASTER_SRV_REC = (
# srv record name, port
(DNSName(u'_ldap._tcp'), 389),
@@ -23,7 +26,7 @@ IPA_DEFAULT_MASTER_SRV_REC = (
)
def resolve_records_from_server(rname, rtype, nameserver, logger):
def resolve_records_from_server(rname, rtype, nameserver):
res = dns.resolver.Resolver()
res.nameservers = [nameserver]
res.lifetime = 10
@@ -86,7 +89,7 @@ class TestDNSLocations(IntegrationTest):
expected = _gen_expected_srv_rrset(
name_abs, port, expected_servers)
query = resolve_records_from_server(
name_abs, 'SRV', server_ip, self.log)
name_abs, 'SRV', server_ip)
assert expected == query, (
"Expected and received DNS data do not match on server "
"with IP: '{}' for name '{}' (expected:\n{}\ngot:\n{})".format(

View File

@@ -2,6 +2,8 @@
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
import logging
import dns.dnssec
import dns.resolver
import dns.name
@@ -11,6 +13,8 @@ from ipatests.test_integration.base import IntegrationTest
from ipatests.pytest_plugins.integration import tasks
from ipaplatform.paths import paths
logger = logging.getLogger(__name__)
test_zone = "dnssec.test."
test_zone_repl = "dnssec-replica.test."
root_zone = "."
@@ -19,7 +23,7 @@ example2_test_zone = "example2.test."
example3_test_zone = "example3.test."
def resolve_with_dnssec(nameserver, query, log, rtype="SOA"):
def resolve_with_dnssec(nameserver, query, rtype="SOA"):
res = dns.resolver.Resolver()
res.nameservers = [nameserver]
res.lifetime = 10 # wait max 10 seconds for reply
@@ -32,17 +36,18 @@ def resolve_with_dnssec(nameserver, query, log, rtype="SOA"):
ans = res.query(query, rtype)
return ans
def get_RRSIG_record(nameserver, query, log, rtype="SOA"):
ans = resolve_with_dnssec(nameserver, query, log, rtype=rtype)
def get_RRSIG_record(nameserver, query, rtype="SOA"):
ans = resolve_with_dnssec(nameserver, query, rtype=rtype)
return ans.response.find_rrset(
ans.response.answer, dns.name.from_text(query),
dns.rdataclass.IN, dns.rdatatype.RRSIG,
dns.rdatatype.from_text(rtype))
def is_record_signed(nameserver, query, log, rtype="SOA"):
def is_record_signed(nameserver, query, rtype="SOA"):
try:
get_RRSIG_record(nameserver, query, log, rtype=rtype)
get_RRSIG_record(nameserver, query, rtype=rtype)
except KeyError:
return False
except dns.exception.DNSException:
@@ -50,7 +55,7 @@ def is_record_signed(nameserver, query, log, rtype="SOA"):
return True
def wait_until_record_is_signed(nameserver, record, log, rtype="SOA",
def wait_until_record_is_signed(nameserver, record, rtype="SOA",
timeout=100):
"""
Returns True if record is signed, or False on timeout
@@ -61,11 +66,11 @@ def wait_until_record_is_signed(nameserver, record, log, rtype="SOA",
:param timeout:
:return: True if records is signed, False if timeout
"""
log.info("Waiting for signed %s record of %s from server %s (timeout %s "
"sec)", rtype, record, nameserver, timeout)
logger.info("Waiting for signed %s record of %s from server %s (timeout "
"%s sec)", rtype, record, nameserver, timeout)
wait_until = time.time() + timeout
while time.time() < wait_until:
if is_record_signed(nameserver, record, log, rtype=rtype):
if is_record_signed(nameserver, record, rtype=rtype):
return True
time.sleep(1)
return False
@@ -107,12 +112,12 @@ class TestInstallDNSSECLast(IntegrationTest):
# test master
assert wait_until_record_is_signed(
self.master.ip, test_zone, self.log, timeout=100
self.master.ip, test_zone, timeout=100
), "Zone %s is not signed (master)" % test_zone
# test replica
assert wait_until_record_is_signed(
self.replicas[0].ip, test_zone, self.log, timeout=200
self.replicas[0].ip, test_zone, timeout=200
), "DNS zone %s is not signed (replica)" % test_zone
def test_if_zone_is_signed_replica(self):
@@ -127,20 +132,20 @@ class TestInstallDNSSECLast(IntegrationTest):
# test replica
assert wait_until_record_is_signed(
self.replicas[0].ip, test_zone_repl, self.log, timeout=300
self.replicas[0].ip, test_zone_repl, timeout=300
), "Zone %s is not signed (replica)" % test_zone_repl
# we do not need to wait, on master zones should be singed faster
# than on replicas
assert wait_until_record_is_signed(
self.master.ip, test_zone_repl, self.log, timeout=5
self.master.ip, test_zone_repl, timeout=5
), "DNS zone %s is not signed (master)" % test_zone
def test_disable_reenable_signing_master(self):
dnskey_old = resolve_with_dnssec(self.master.ip, test_zone,
self.log, rtype="DNSKEY").rrset
rtype="DNSKEY").rrset
# disable DNSSEC signing of zone on master
args = [
@@ -154,12 +159,12 @@ class TestInstallDNSSECLast(IntegrationTest):
# test master
assert not is_record_signed(
self.master.ip, test_zone, self.log
self.master.ip, test_zone
), "Zone %s is still signed (master)" % test_zone
# test replica
assert not is_record_signed(
self.replicas[0].ip, test_zone, self.log
self.replicas[0].ip, test_zone
), "DNS zone %s is still signed (replica)" % test_zone
# reenable DNSSEC signing
@@ -172,22 +177,22 @@ class TestInstallDNSSECLast(IntegrationTest):
# test master
assert wait_until_record_is_signed(
self.master.ip, test_zone, self.log, timeout=100
self.master.ip, test_zone, timeout=100
), "Zone %s is not signed (master)" % test_zone
# test replica
assert wait_until_record_is_signed(
self.replicas[0].ip, test_zone, self.log, timeout=200
self.replicas[0].ip, test_zone, timeout=200
), "DNS zone %s is not signed (replica)" % test_zone
dnskey_new = resolve_with_dnssec(self.master.ip, test_zone,
self.log, rtype="DNSKEY").rrset
rtype="DNSKEY").rrset
assert dnskey_old != dnskey_new, "DNSKEY should be different"
def test_disable_reenable_signing_replica(self):
dnskey_old = resolve_with_dnssec(self.replicas[0].ip, test_zone_repl,
self.log, rtype="DNSKEY").rrset
rtype="DNSKEY").rrset
# disable DNSSEC signing of zone on replica
args = [
@@ -201,12 +206,12 @@ class TestInstallDNSSECLast(IntegrationTest):
# test master
assert not is_record_signed(
self.master.ip, test_zone_repl, self.log
self.master.ip, test_zone_repl
), "Zone %s is still signed (master)" % test_zone_repl
# test replica
assert not is_record_signed(
self.replicas[0].ip, test_zone_repl, self.log
self.replicas[0].ip, test_zone_repl
), "DNS zone %s is still signed (replica)" % test_zone_repl
# reenable DNSSEC signing
@@ -219,16 +224,16 @@ class TestInstallDNSSECLast(IntegrationTest):
# test master
assert wait_until_record_is_signed(
self.master.ip, test_zone_repl, self.log, timeout=100
self.master.ip, test_zone_repl, timeout=100
), "Zone %s is not signed (master)" % test_zone_repl
# test replica
assert wait_until_record_is_signed(
self.replicas[0].ip, test_zone_repl, self.log, timeout=200
self.replicas[0].ip, test_zone_repl, timeout=200
), "DNS zone %s is not signed (replica)" % test_zone_repl
dnskey_new = resolve_with_dnssec(self.replicas[0].ip, test_zone_repl,
self.log, rtype="DNSKEY").rrset
rtype="DNSKEY").rrset
assert dnskey_old != dnskey_new, "DNSKEY should be different"
@@ -292,12 +297,12 @@ class TestInstallDNSSECFirst(IntegrationTest):
self.master.run_command(args)
# test master
assert wait_until_record_is_signed(
self.master.ip, root_zone, self.log, timeout=100
self.master.ip, root_zone, timeout=100
), "Zone %s is not signed (master)" % root_zone
# test replica
assert wait_until_record_is_signed(
self.replicas[0].ip, root_zone, self.log, timeout=300
self.replicas[0].ip, root_zone, timeout=300
), "Zone %s is not signed (replica)" % root_zone
def test_chain_of_trust(self):
@@ -322,15 +327,15 @@ class TestInstallDNSSECFirst(IntegrationTest):
self.master.run_command(args)
# wait until zone is signed
assert wait_until_record_is_signed(
self.master.ip, example_test_zone, self.log, timeout=100
self.master.ip, example_test_zone, timeout=100
), "Zone %s is not signed (master)" % example_test_zone
# wait until zone is signed
assert wait_until_record_is_signed(
self.replicas[0].ip, example_test_zone, self.log, timeout=200
self.replicas[0].ip, example_test_zone, timeout=200
), "Zone %s is not signed (replica)" % example_test_zone
# GET DNSKEY records from zone
ans = resolve_with_dnssec(self.master.ip, example_test_zone, self.log,
ans = resolve_with_dnssec(self.master.ip, example_test_zone,
rtype="DNSKEY")
dnskey_rrset = ans.response.get_rrset(
ans.response.answer,
@@ -339,7 +344,7 @@ class TestInstallDNSSECFirst(IntegrationTest):
dns.rdatatype.DNSKEY)
assert dnskey_rrset, "No DNSKEY records received"
self.log.debug("DNSKEY records returned: %s", dnskey_rrset.to_text())
logger.debug("DNSKEY records returned: %s", dnskey_rrset.to_text())
# generate DS records
ds_records = []
@@ -351,7 +356,7 @@ class TestInstallDNSSECFirst(IntegrationTest):
assert ds_records, ("No KSK returned from the %s zone" %
example_test_zone)
self.log.debug("DS records for %s created: %r", example_test_zone,
logger.debug("DS records for %s created: %r", example_test_zone,
ds_records)
# add DS records to root zone
@@ -368,12 +373,12 @@ class TestInstallDNSSECFirst(IntegrationTest):
# wait until DS records it replicated
assert wait_until_record_is_signed(
self.replicas[0].ip, example_test_zone, self.log, timeout=100,
self.replicas[0].ip, example_test_zone, timeout=100,
rtype="DS"
), "No DS record of '%s' returned from replica" % example_test_zone
# extract DSKEY from root zone
ans = resolve_with_dnssec(self.master.ip, root_zone, self.log,
ans = resolve_with_dnssec(self.master.ip, root_zone,
rtype="DNSKEY")
dnskey_rrset = ans.response.get_rrset(ans.response.answer,
dns.name.from_text(root_zone),
@@ -381,7 +386,7 @@ class TestInstallDNSSECFirst(IntegrationTest):
dns.rdatatype.DNSKEY)
assert dnskey_rrset, "No DNSKEY records received"
self.log.debug("DNSKEY records returned: %s", dnskey_rrset.to_text())
logger.debug("DNSKEY records returned: %s", dnskey_rrset.to_text())
# export trust keys for root zone
root_key_rdatas = []
@@ -395,7 +400,7 @@ class TestInstallDNSSECFirst(IntegrationTest):
root_keys_rrset = dns.rrset.from_rdata_list(dnskey_rrset.name,
dnskey_rrset.ttl,
root_key_rdatas)
self.log.debug("Root zone trusted key: %s", root_keys_rrset.to_text())
logger.debug("Root zone trusted key: %s", root_keys_rrset.to_text())
# set trusted key for our root zone
self.master.put_file_contents(paths.DNSSEC_TRUSTED_KEY,
@@ -459,15 +464,15 @@ class TestMigrateDNSSECMaster(IntegrationTest):
# wait until zone is signed
assert wait_until_record_is_signed(
self.master.ip, example_test_zone, self.log, timeout=100
self.master.ip, example_test_zone, timeout=100
), "Zone %s is not signed (master)" % example_test_zone
# wait until zone is signed
assert wait_until_record_is_signed(
self.replicas[0].ip, example_test_zone, self.log, timeout=200
self.replicas[0].ip, example_test_zone, timeout=200
), "Zone %s is not signed (replica)" % example_test_zone
dnskey_old = resolve_with_dnssec(self.master.ip, example_test_zone,
self.log, rtype="DNSKEY").rrset
rtype="DNSKEY").rrset
# migrate dnssec master to replica
args = [
@@ -495,16 +500,16 @@ class TestMigrateDNSSECMaster(IntegrationTest):
# wait until zone is signed
assert wait_until_record_is_signed(
self.master.ip, example_test_zone, self.log, timeout=100
self.master.ip, example_test_zone, timeout=100
), "Zone %s is not signed after migration (master)" % example_test_zone
# wait until zone is signed
assert wait_until_record_is_signed(
self.replicas[0].ip, example_test_zone, self.log, timeout=200
self.replicas[0].ip, example_test_zone, timeout=200
), "Zone %s is not signed after migration (replica)" % example_test_zone
# test if dnskey are the same
dnskey_new = resolve_with_dnssec(self.master.ip, example_test_zone,
self.log, rtype="DNSKEY").rrset
rtype="DNSKEY").rrset
assert dnskey_old == dnskey_new, "DNSKEY should be the same"
# add test zone
@@ -515,12 +520,12 @@ class TestMigrateDNSSECMaster(IntegrationTest):
self.replicas[0].run_command(args)
# wait until zone is signed
assert wait_until_record_is_signed(
self.replicas[0].ip, example2_test_zone, self.log, timeout=100
self.replicas[0].ip, example2_test_zone, timeout=100
), ("Zone %s is not signed after migration (replica - dnssec master)"
% example2_test_zone)
# wait until zone is signed
assert wait_until_record_is_signed(
self.master.ip, example2_test_zone, self.log, timeout=200
self.master.ip, example2_test_zone, timeout=200
), ("Zone %s is not signed after migration (master)"
% example2_test_zone)
@@ -530,12 +535,12 @@ class TestMigrateDNSSECMaster(IntegrationTest):
# test if originial zones are signed on new replica
# wait until zone is signed
assert wait_until_record_is_signed(
self.replicas[1].ip, example_test_zone, self.log, timeout=200
self.replicas[1].ip, example_test_zone, timeout=200
), ("Zone %s is not signed (new replica)"
% example_test_zone)
# wait until zone is signed
assert wait_until_record_is_signed(
self.replicas[1].ip, example2_test_zone, self.log, timeout=200
self.replicas[1].ip, example2_test_zone, timeout=200
), ("Zone %s is not signed (new replica)"
% example2_test_zone)
@@ -547,15 +552,15 @@ class TestMigrateDNSSECMaster(IntegrationTest):
self.replicas[1].run_command(args)
# wait until zone is signed
assert wait_until_record_is_signed(
self.replicas[1].ip, example3_test_zone, self.log, timeout=200
self.replicas[1].ip, example3_test_zone, timeout=200
), ("Zone %s is not signed (new replica)"
% example3_test_zone)
assert wait_until_record_is_signed(
self.replicas[0].ip, example3_test_zone, self.log, timeout=200
self.replicas[0].ip, example3_test_zone, timeout=200
), ("Zone %s is not signed (replica)"
% example3_test_zone)
# wait until zone is signed
assert wait_until_record_is_signed(
self.master.ip, example3_test_zone, self.log, timeout=200
self.master.ip, example3_test_zone, timeout=200
), ("Zone %s is not signed (master)"
% example3_test_zone)

View File

@@ -16,6 +16,8 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import subprocess
from ipaplatform.paths import paths
@@ -24,6 +26,8 @@ import pytest
from ipatests.test_integration.base import IntegrationTest
from ipatests.pytest_plugins.integration import tasks
logger = logging.getLogger(__name__)
CLIENT_KEYTAB = paths.KRB5_KEYTAB
@@ -253,7 +257,8 @@ class TestForcedClientReenrollment(IntegrationTest):
assert sshfp_record, 'SSHFP record not found'
sshfp_record = set(sshfp_record.split(', '))
self.log.debug("SSHFP record for host %s: %s", client_host, str(sshfp_record))
logger.debug("SSHFP record for host %s: %s",
client_host, str(sshfp_record))
return sshfp_record

View File

@@ -206,18 +206,17 @@ ipa_class_members = {
'Method',
'Updater',
'Advice',
] + LOGGING_ATTRS,
],
'ipalib.util.ForwarderValidationError': [
'msg',
],
'ipaserver.install.ldapupdate.LDAPUpdate': LOGGING_ATTRS,
'ipaserver.plugins.dns.DNSRecord': [
'validatedns',
'normalizedns',
],
'ipaserver.rpcserver.KerberosSession': [
fake_api,
] + LOGGING_ATTRS,
],
'ipatests.test_integration.base.IntegrationTest': [
'domain',
{'master': [