logging: remove object-specific loggers

Remove all object-specific loggers, with the exception of `Plugin.log`,
which is now deprecated. Replace affected logger calls with module-level
logger calls.

Deprecate object-specific loggers in `ipa_log_manager.get_logger`.

Reviewed-By: Martin Basti <mbasti@redhat.com>
This commit is contained in:
Jan Cholasta
2017-05-23 18:35:57 +02:00
committed by Martin Basti
parent 9d19654cbd
commit ffadcb0414
72 changed files with 1095 additions and 783 deletions

View File

@@ -17,6 +17,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
# #
import logging
import os import os
import tempfile import tempfile
import shutil import shutil
@@ -34,6 +35,8 @@ from ipaplatform.tasks import tasks
from ipalib import api, errors, x509 from ipalib import api, errors, x509
from ipalib.constants import IPA_CA_NICKNAME, RENEWAL_CA_NAME from ipalib.constants import IPA_CA_NICKNAME, RENEWAL_CA_NAME
logger = logging.getLogger(__name__)
class CertUpdate(admintool.AdminTool): class CertUpdate(admintool.AdminTool):
command_name = 'ipa-certupdate' command_name = 'ipa-certupdate'
@@ -103,10 +106,9 @@ class CertUpdate(admintool.AdminTool):
# pylint: disable=import-error,ipa-forbidden-import # pylint: disable=import-error,ipa-forbidden-import
from ipaserver.install import cainstance from ipaserver.install import cainstance
# pylint: enable=import-error,ipa-forbidden-import # pylint: enable=import-error,ipa-forbidden-import
cainstance.add_lightweight_ca_tracking_requests( cainstance.add_lightweight_ca_tracking_requests(lwcas)
self.log, lwcas)
except Exception: except Exception:
self.log.exception( logger.exception(
"Failed to add lightweight CA tracking requests") "Failed to add lightweight CA tracking requests")
self.update_client(certs) self.update_client(certs)
@@ -124,8 +126,8 @@ class CertUpdate(admintool.AdminTool):
try: try:
ipa_db.delete_cert(nickname) ipa_db.delete_cert(nickname)
except ipautil.CalledProcessError as e: except ipautil.CalledProcessError as e:
self.log.error("Failed to remove %s from %s: %s", logger.error("Failed to remove %s from %s: %s",
nickname, ipa_db.secdir, e) nickname, ipa_db.secdir, e)
break break
self.update_db(ipa_db.secdir, certs) self.update_db(ipa_db.secdir, certs)
@@ -153,7 +155,7 @@ class CertUpdate(admintool.AdminTool):
if request_id is not None: if request_id is not None:
timeout = api.env.startup_timeout + 60 timeout = api.env.startup_timeout + 60
self.log.debug("resubmitting certmonger request '%s'", request_id) logger.debug("resubmitting certmonger request '%s'", request_id)
certmonger.resubmit_request( certmonger.resubmit_request(
request_id, ca='dogtag-ipa-ca-renew-agent-reuse', profile='') request_id, ca='dogtag-ipa-ca-renew-agent-reuse', profile='')
try: try:
@@ -168,7 +170,7 @@ class CertUpdate(admintool.AdminTool):
"Error resubmitting certmonger request '%s', " "Error resubmitting certmonger request '%s', "
"please check the request manually" % request_id) "please check the request manually" % request_id)
self.log.debug("modifying certmonger request '%s'", request_id) logger.debug("modifying certmonger request '%s'", request_id)
certmonger.modify(request_id, ca='dogtag-ipa-ca-renew-agent') certmonger.modify(request_id, ca='dogtag-ipa-ca-renew-agent')
self.update_file(paths.CA_CRT, certs) self.update_file(paths.CA_CRT, certs)
@@ -179,7 +181,7 @@ class CertUpdate(admintool.AdminTool):
try: try:
x509.write_certificate_list(certs, filename) x509.write_certificate_list(certs, filename)
except Exception as e: except Exception as e:
self.log.error("failed to update %s: %s", filename, e) logger.error("failed to update %s: %s", filename, e)
def update_db(self, path, certs): def update_db(self, path, certs):
db = certdb.NSSDatabase(path) db = certdb.NSSDatabase(path)
@@ -189,5 +191,5 @@ class CertUpdate(admintool.AdminTool):
try: try:
db.add_cert(cert, nickname, trust_flags) db.add_cert(cert, nickname, trust_flags)
except ipautil.CalledProcessError as e: except ipautil.CalledProcessError as e:
self.log.error( logger.error(
"failed to update %s in %s: %s", nickname, path, e) "failed to update %s in %s: %s", nickname, path, e)

View File

@@ -527,7 +527,7 @@ the following:
Also note that your ``execute()`` method should not contain any ``print`` Also note that your ``execute()`` method should not contain any ``print``
statements or otherwise cause any output on ``sys.stdout``. Your command can statements or otherwise cause any output on ``sys.stdout``. Your command can
(and should) produce log messages by using ``self.log`` (see below). (and should) produce log messages by using a module-level logger (see below).
To learn more about XML-RPC (XML Remote Procedure Call), see: To learn more about XML-RPC (XML Remote Procedure Call), see:
@@ -670,18 +670,19 @@ See the `ipalib.cli.textui` plugin for a description of its methods.
Logging from your plugin Logging from your plugin
------------------------ ------------------------
After `plugable.Plugin.__init__()` is called, your plugin will have a Plugins should log through a module-level logger.
``self.log`` attribute. Plugins should only log through this attribute.
For example: For example:
>>> import logging
>>> logger = logging.getLogger(__name__)
>>> class paint_house(Command): >>> class paint_house(Command):
... ...
... takes_args = 'color' ... takes_args = 'color'
... ...
... def execute(self, color, **options): ... def execute(self, color, **options):
... """Uses self.log.error()""" ... """Uses logger.error()"""
... if color not in ('red', 'blue', 'green'): ... if color not in ('red', 'blue', 'green'):
... self.log.error("I don't have %s paint!", color) # Log error ... logger.error("I don't have %s paint!", color) # Log error
... return ... return
... return 'I painted the house %s.' % color ... return 'I painted the house %s.' % color
... ...
@@ -691,7 +692,7 @@ Some basic knowledge of the Python ``logging`` module might be helpful. See:
http://docs.python.org/library/logging.html http://docs.python.org/library/logging.html
The important thing to remember is that your plugin should not configure The important thing to remember is that your plugin should not configure
logging itself, but should instead simply use the ``self.log`` logger. logging itself, but should instead simply use the module-level logger.
Also see the `plugable.API.bootstrap()` method for details on how the logging Also see the `plugable.API.bootstrap()` method for details on how the logging
is configured. is configured.

View File

@@ -21,6 +21,7 @@
Base classes for all backed-end plugins. Base classes for all backed-end plugins.
""" """
import logging
import threading import threading
import os import os
@@ -28,6 +29,8 @@ from ipalib import plugable
from ipalib.errors import PublicError, InternalError, CommandError from ipalib.errors import PublicError, InternalError, CommandError
from ipalib.request import context, Connection, destroy_context from ipalib.request import context, Connection, destroy_context
logger = logging.getLogger(__name__)
class Backend(plugable.Plugin): class Backend(plugable.Plugin):
""" """
@@ -66,7 +69,7 @@ class Connectible(Backend):
conn = self.create_connection(*args, **kw) conn = self.create_connection(*args, **kw)
setattr(context, self.id, Connection(conn, self.disconnect)) setattr(context, self.id, Connection(conn, self.disconnect))
assert self.conn is conn assert self.conn is conn
self.debug('Created connection context.%s' % self.id) logger.debug('Created connection context.%s', self.id)
def create_connection(self, *args, **kw): def create_connection(self, *args, **kw):
raise NotImplementedError('%s.create_connection()' % self.id) raise NotImplementedError('%s.create_connection()' % self.id)
@@ -82,7 +85,7 @@ class Connectible(Backend):
) )
self.destroy_connection() self.destroy_connection()
delattr(context, self.id) delattr(context, self.id)
self.debug('Destroyed connection context.%s' % self.id) logger.debug('Destroyed connection context.%s', self.id)
def destroy_connection(self): def destroy_connection(self):
raise NotImplementedError('%s.destroy_connection()' % self.id) raise NotImplementedError('%s.destroy_connection()' % self.id)
@@ -140,7 +143,7 @@ class Executioner(Backend):
except PublicError as e: except PublicError as e:
error = e error = e
except Exception as e: except Exception as e:
self.exception( logger.exception(
'non-public: %s: %s', e.__class__.__name__, str(e) 'non-public: %s: %s', e.__class__.__name__, str(e)
) )
error = InternalError() error = InternalError()

View File

@@ -20,6 +20,8 @@
""" """
Base classes for all front-end plugins. Base classes for all front-end plugins.
""" """
import logging
import six import six
from ipapython.version import API_VERSION from ipapython.version import API_VERSION
@@ -41,6 +43,8 @@ from ipalib.util import classproperty, json_serialize
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
RULE_FLAG = 'validation_rule' RULE_FLAG = 'validation_rule'
def rule(obj): def rule(obj):
@@ -459,14 +463,14 @@ class Command(HasParam):
self.add_message( self.add_message(
messages.VersionMissing(server_version=self.api_version)) messages.VersionMissing(server_version=self.api_version))
params = self.args_options_2_params(*args, **options) params = self.args_options_2_params(*args, **options)
self.debug( logger.debug(
'raw: %s(%s)', self.name, ', '.join(self._repr_iter(**params)) 'raw: %s(%s)', self.name, ', '.join(self._repr_iter(**params))
) )
if self.api.env.in_server: if self.api.env.in_server:
params.update(self.get_default(**params)) params.update(self.get_default(**params))
params = self.normalize(**params) params = self.normalize(**params)
params = self.convert(**params) params = self.convert(**params)
self.debug( logger.debug(
'%s(%s)', self.name, ', '.join(self._repr_iter(**params)) '%s(%s)', self.name, ', '.join(self._repr_iter(**params))
) )
if self.api.env.in_server: if self.api.env.in_server:
@@ -1463,7 +1467,7 @@ class Updater(Plugin):
raise NotImplementedError('%s.execute()' % self.name) raise NotImplementedError('%s.execute()' % self.name)
def __call__(self, **options): def __call__(self, **options):
self.debug( logger.debug(
'raw: %s', self.name 'raw: %s', self.name
) )

View File

@@ -34,6 +34,7 @@ from __future__ import absolute_import
from decimal import Decimal from decimal import Decimal
import datetime import datetime
import logging
import os import os
import locale import locale
import base64 import base64
@@ -91,6 +92,8 @@ else:
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
COOKIE_NAME = 'ipa_session' COOKIE_NAME = 'ipa_session'
CCACHE_COOKIE_KEY = 'X-IPA-Session-Cookie' CCACHE_COOKIE_KEY = 'X-IPA-Session-Cookie'
@@ -889,8 +892,7 @@ class RPCClient(Connectible):
cookie_string = read_persistent_client_session_data(principal) cookie_string = read_persistent_client_session_data(principal)
cookie_string = cookie_string.decode('utf-8') cookie_string = cookie_string.decode('utf-8')
except Exception as e: except Exception as e:
self.log.debug('Error reading client session data: {err}' logger.debug('Error reading client session data: %s', e)
.format(err=e))
return None return None
# Search for the session cookie within the cookie string # Search for the session cookie within the cookie string
@@ -899,9 +901,9 @@ class RPCClient(Connectible):
cookie_string, COOKIE_NAME, cookie_string, COOKIE_NAME,
timestamp=datetime.datetime.utcnow()) timestamp=datetime.datetime.utcnow())
except Exception as e: except Exception as e:
self.log.debug( logger.debug(
'Error retrieving cookie from the persistent storage: {err}' 'Error retrieving cookie from the persistent storage: %s',
.format(err=e)) e)
return None return None
return session_cookie return session_cookie
@@ -934,33 +936,37 @@ class RPCClient(Connectible):
session_cookie = self.get_session_cookie_from_persistent_storage(principal) session_cookie = self.get_session_cookie_from_persistent_storage(principal)
if session_cookie is None: if session_cookie is None:
self.log.debug("failed to find session_cookie in persistent storage for principal '%s'", logger.debug("failed to find session_cookie in persistent storage "
principal) "for principal '%s'",
principal)
return original_url return original_url
else: else:
self.debug("found session_cookie in persistent storage for principal '%s', cookie: '%s'", logger.debug("found session_cookie in persistent storage for "
principal, session_cookie) "principal '%s', cookie: '%s'",
principal, session_cookie)
# Decide if we should send the cookie to the server # Decide if we should send the cookie to the server
try: try:
session_cookie.http_return_ok(original_url) session_cookie.http_return_ok(original_url)
except Cookie.Expired as e: except Cookie.Expired as e:
self.debug("deleting session data for principal '%s': %s", principal, e) logger.debug("deleting session data for principal '%s': %s",
principal, e)
try: try:
delete_persistent_client_session_data(principal) delete_persistent_client_session_data(principal)
except Exception as e: except Exception as e:
pass pass
return original_url return original_url
except Cookie.URLMismatch as e: except Cookie.URLMismatch as e:
self.debug("not sending session cookie, URL mismatch: %s", e) logger.debug("not sending session cookie, URL mismatch: %s", e)
return original_url return original_url
except Exception as e: except Exception as e:
self.error("not sending session cookie, unknown error: %s", e) logger.error("not sending session cookie, unknown error: %s", e)
return original_url return original_url
# O.K. session_cookie is valid to be returned, stash it away where it will will # O.K. session_cookie is valid to be returned, stash it away where it will will
# get included in a HTTP Cookie headed sent to the server. # get included in a HTTP Cookie headed sent to the server.
self.log.debug("setting session_cookie into context '%s'", session_cookie.http_cookie()) logger.debug("setting session_cookie into context '%s'",
session_cookie.http_cookie())
setattr(context, 'session_cookie', session_cookie.http_cookie()) setattr(context, 'session_cookie', session_cookie.http_cookie())
# Form the session URL by substituting the session path into the original URL # Form the session URL by substituting the session path into the original URL
@@ -1023,7 +1029,7 @@ class RPCClient(Connectible):
transport_class = LanguageAwareTransport transport_class = LanguageAwareTransport
proxy_kw['transport'] = transport_class( proxy_kw['transport'] = transport_class(
protocol=self.protocol, service='HTTP', ccache=ccache) protocol=self.protocol, service='HTTP', ccache=ccache)
self.log.info('trying %s' % url) logger.info('trying %s', url)
setattr(context, 'request_url', url) setattr(context, 'request_url', url)
serverproxy = self.server_proxy_class(url, **proxy_kw) serverproxy = self.server_proxy_class(url, **proxy_kw)
if len(urls) == 1: if len(urls) == 1:
@@ -1066,7 +1072,7 @@ class RPCClient(Connectible):
if not fallback: if not fallback:
raise raise
else: else:
self.log.info( logger.info(
'Connection to %s failed with %s', url, e) 'Connection to %s failed with %s', url, e)
# try the next url # try the next url
break break
@@ -1074,7 +1080,7 @@ class RPCClient(Connectible):
if not fallback: if not fallback:
raise raise
else: else:
self.log.info( logger.info(
'Connection to %s failed with %s', url, e) 'Connection to %s failed with %s', url, e)
# try the next url # try the next url
break break
@@ -1113,14 +1119,14 @@ class RPCClient(Connectible):
# each time should we be getting UNAUTHORIZED error from the server # each time should we be getting UNAUTHORIZED error from the server
max_tries = 5 max_tries = 5
for try_num in range(0, max_tries): for try_num in range(0, max_tries):
self.log.info("[try %d]: Forwarding '%s' to %s server '%s'", logger.info("[try %d]: Forwarding '%s' to %s server '%s'",
try_num+1, name, self.protocol, server) try_num+1, name, self.protocol, server)
try: try:
return self._call_command(command, params) return self._call_command(command, params)
except Fault as e: except Fault as e:
e = decode_fault(e) e = decode_fault(e)
self.debug('Caught fault %d from server %s: %s', e.faultCode, logger.debug('Caught fault %d from server %s: %s', e.faultCode,
server, e.faultString) server, e.faultString)
if e.faultCode in errors_by_code: if e.faultCode in errors_by_code:
error = errors_by_code[e.faultCode] error = errors_by_code[e.faultCode]
raise error(message=e.faultString) raise error(message=e.faultString)
@@ -1143,8 +1149,8 @@ class RPCClient(Connectible):
except Exception as e: except Exception as e:
# This shouldn't happen if we have a session # This shouldn't happen if we have a session
# but it isn't fatal. # but it isn't fatal.
self.debug("Error trying to remove persisent session " logger.debug("Error trying to remove persisent "
"data: {err}".format(err=e)) "session data: %s", e)
# Create a new serverproxy with the non-session URI # Create a new serverproxy with the non-session URI
serverproxy = self.create_connection( serverproxy = self.create_connection(

View File

@@ -26,6 +26,7 @@ from __future__ import (
print_function, print_function,
) )
import logging
import os import os
import socket import socket
import re import re
@@ -64,6 +65,8 @@ from ipapython.ipa_log_manager import root_logger
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
def json_serialize(obj): def json_serialize(obj):
if isinstance(obj, (list, tuple)): if isinstance(obj, (list, tuple)):
@@ -697,10 +700,9 @@ def _log_response(log, e):
:param e: DNSException :param e: DNSException
""" """
assert isinstance(e, DNSException) assert isinstance(e, DNSException)
if log is not None: response = getattr(e, 'kwargs', {}).get('response')
response = getattr(e, 'kwargs', {}).get('response') if response:
if response: log.debug("DNSException: %s; server response: %s", e, response)
log.debug("DNSException: %s; server response: %s", e, response)
def _resolve_record(owner, rtype, nameserver_ip=None, edns0=False, def _resolve_record(owner, rtype, nameserver_ip=None, edns0=False,
@@ -736,7 +738,7 @@ def _resolve_record(owner, rtype, nameserver_ip=None, edns0=False,
return res.query(owner, rtype) return res.query(owner, rtype)
def _validate_edns0_forwarder(owner, rtype, ip_addr, log=None, timeout=10): def _validate_edns0_forwarder(owner, rtype, ip_addr, log=logger, timeout=10):
""" """
Validate if forwarder supports EDNS0 Validate if forwarder supports EDNS0
@@ -760,7 +762,7 @@ def _validate_edns0_forwarder(owner, rtype, ip_addr, log=None, timeout=10):
error=e) error=e)
def validate_dnssec_global_forwarder(ip_addr, log=None, timeout=10): def validate_dnssec_global_forwarder(ip_addr, log=logger, timeout=10):
"""Test DNS forwarder properties. against root zone. """Test DNS forwarder properties. against root zone.
Global forwarders should be able return signed root zone Global forwarders should be able return signed root zone
@@ -793,17 +795,16 @@ def validate_dnssec_global_forwarder(ip_addr, log=None, timeout=10):
raise DNSSECSignatureMissingError(owner=owner, rtype=rtype, ip=ip_addr) raise DNSSECSignatureMissingError(owner=owner, rtype=rtype, ip=ip_addr)
def validate_dnssec_zone_forwarder_step1(ip_addr, fwzone, log=None, timeout=10): def validate_dnssec_zone_forwarder_step1(ip_addr, fwzone, timeout=10):
""" """
Only forwarders in forward zones can be validated in this way Only forwarders in forward zones can be validated in this way
:raise UnresolvableRecordError: record cannot be resolved :raise UnresolvableRecordError: record cannot be resolved
:raise EDNS0UnsupportedError: ENDS0 is not supported by forwarder :raise EDNS0UnsupportedError: ENDS0 is not supported by forwarder
""" """
_validate_edns0_forwarder(fwzone, "SOA", ip_addr, log=log, timeout=timeout) _validate_edns0_forwarder(fwzone, "SOA", ip_addr, timeout=timeout)
def validate_dnssec_zone_forwarder_step2(ipa_ip_addr, fwzone, log=None, def validate_dnssec_zone_forwarder_step2(ipa_ip_addr, fwzone, timeout=10):
timeout=10):
""" """
This step must be executed after forwarders are added into LDAP, and only This step must be executed after forwarders are added into LDAP, and only
when we are sure the forwarders work. when we are sure the forwarders work.
@@ -813,6 +814,7 @@ def validate_dnssec_zone_forwarder_step2(ipa_ip_addr, fwzone, log=None,
:raise UnresolvableRecordError: record cannot be resolved :raise UnresolvableRecordError: record cannot be resolved
:raise DNSSECValidationError: response from forwarder is not DNSSEC valid :raise DNSSECValidationError: response from forwarder is not DNSSEC valid
""" """
log = logger
rtype = "SOA" rtype = "SOA"
try: try:
ans_cd = _resolve_record(fwzone, rtype, nameserver_ip=ipa_ip_addr, ans_cd = _resolve_record(fwzone, rtype, nameserver_ip=ipa_ip_addr,

View File

@@ -31,6 +31,9 @@ from optparse import OptionGroup # pylint: disable=deprecated-module
from ipapython import version from ipapython import version
from ipapython import config from ipapython import config
from ipapython import ipa_log_manager from ipapython import ipa_log_manager
from ipapython.ipa_log_manager import standard_logging_setup
logger = logging.getLogger(__name__)
class ScriptError(Exception): class ScriptError(Exception):
@@ -88,7 +91,6 @@ class AdminTool(object):
usage = None usage = None
description = None description = None
log = None
_option_parsers = dict() _option_parsers = dict()
@classmethod @classmethod
@@ -226,7 +228,7 @@ class AdminTool(object):
- a plain print for things that should not be log (for example, - a plain print for things that should not be log (for example,
interactive prompting) interactive prompting)
To log, use `self.log.info()`, `self.log.warning()`, etc. To log, use a module-level logger.
Logging to file is only set up after option validation and prompting; Logging to file is only set up after option validation and prompting;
before that, all output will go to the console only. before that, all output will go to the console only.
@@ -258,14 +260,13 @@ class AdminTool(object):
verbose = False verbose = False
else: else:
verbose = True verbose = True
ipa_log_manager.standard_logging_setup( standard_logging_setup(
log_file_name, console_format=console_format, log_file_name, console_format=console_format,
filemode=log_file_mode, debug=debug, verbose=verbose) filemode=log_file_mode, debug=debug, verbose=verbose)
self.log = ipa_log_manager.log_mgr.get_logger(self)
if log_file_name: if log_file_name:
self.log.debug('Logging to %s' % log_file_name) logger.debug('Logging to %s', log_file_name)
elif not no_file: elif not no_file:
self.log.debug('Not logging to a file') logger.debug('Not logging to a file')
def handle_error(self, exception): def handle_error(self, exception):
@@ -290,20 +291,20 @@ class AdminTool(object):
assumed to have run successfully, and the return value is used as the assumed to have run successfully, and the return value is used as the
SystemExit code. SystemExit code.
""" """
self.log.debug('%s was invoked with arguments %s and options: %s', logger.debug('%s was invoked with arguments %s and options: %s',
self.command_name, self.args, self.safe_options) self.command_name, self.args, self.safe_options)
self.log.debug('IPA version %s' % version.VENDOR_VERSION) logger.debug('IPA version %s', version.VENDOR_VERSION)
def log_failure(self, error_message, return_value, exception, backtrace): def log_failure(self, error_message, return_value, exception, backtrace):
self.log.debug(''.join(traceback.format_tb(backtrace))) logger.debug('%s', ''.join(traceback.format_tb(backtrace)))
self.log.debug('The %s command failed, exception: %s: %s', logger.debug('The %s command failed, exception: %s: %s',
self.command_name, type(exception).__name__, exception) self.command_name, type(exception).__name__, exception)
if error_message: if error_message:
self.log.error(error_message) logger.error('%s', error_message)
message = "The %s command failed." % self.command_name message = "The %s command failed." % self.command_name
if self.log_file_name: if self.log_file_name:
message += " See %s for more information" % self.log_file_name message += " See %s for more information" % self.log_file_name
self.log.error(message) logger.error('%s', message)
def log_success(self): def log_success(self):
self.log.info('The %s command was successful', self.command_name) logger.info('The %s command was successful', self.command_name)

View File

@@ -27,8 +27,6 @@ import six
from six.moves.urllib.parse import urlparse from six.moves.urllib.parse import urlparse
# pylint: enable=import-error # pylint: enable=import-error
from ipapython.ipa_log_manager import log_mgr
''' '''
Core Python has two cookie libraries, Cookie.py targeted to server Core Python has two cookie libraries, Cookie.py targeted to server
side and cookielib.py targeted to client side. So why this module and side and cookielib.py targeted to client side. So why this module and
@@ -354,9 +352,6 @@ class Cookie(object):
def __init__(self, key, value, domain=None, path=None, max_age=None, expires=None, def __init__(self, key, value, domain=None, path=None, max_age=None, expires=None,
secure=None, httponly=None, timestamp=None): secure=None, httponly=None, timestamp=None):
log_mgr.get_logger(self, True)
self.key = key self.key = key
self.value = value self.value = value
self.domain = domain self.domain = domain

View File

@@ -9,12 +9,14 @@ Command line support.
import collections import collections
import enum import enum
import functools import functools
import logging
import optparse # pylint: disable=deprecated-module import optparse # pylint: disable=deprecated-module
import signal import signal
import six import six
from ipapython import admintool, ipa_log_manager from ipapython import admintool
from ipapython.ipa_log_manager import standard_logging_setup
from ipapython.ipautil import CheckedIPAddress, private_ccache from ipapython.ipautil import CheckedIPAddress, private_ccache
from . import core, common from . import core, common
@@ -26,6 +28,8 @@ if six.PY3:
NoneType = type(None) NoneType = type(None)
logger = logging.getLogger(__name__)
def _get_usage(configurable_class): def _get_usage(configurable_class):
usage = '%prog [options]' usage = '%prog [options]'
@@ -285,16 +289,15 @@ class ConfigureTool(admintool.AdminTool):
log_file_name = self.options.log_file log_file_name = self.options.log_file
else: else:
log_file_name = self.log_file_name log_file_name = self.log_file_name
ipa_log_manager.standard_logging_setup( standard_logging_setup(
log_file_name, log_file_name,
verbose=self.verbose, verbose=self.verbose,
debug=self.options.verbose, debug=self.options.verbose,
console_format=self.console_format) console_format=self.console_format)
self.log = ipa_log_manager.log_mgr.get_logger(self)
if log_file_name: if log_file_name:
self.log.debug('Logging to %s' % log_file_name) logger.debug('Logging to %s', log_file_name)
elif not no_file: elif not no_file:
self.log.debug('Not logging to a file') logger.debug('Not logging to a file')
def run(self): def run(self):
kwargs = {} kwargs = {}

View File

@@ -20,6 +20,7 @@ import logging
import os import os
import re import re
import time import time
import warnings
import sys import sys
import six import six
@@ -56,14 +57,57 @@ LOGGING_FORMAT_STANDARD_CONSOLE = '%(name)-12s: %(levelname)-8s %(message)s'
LOGGING_FORMAT_STANDARD_FILE = '%(asctime)s %(levelname)s %(message)s' LOGGING_FORMAT_STANDARD_FILE = '%(asctime)s %(levelname)s %(message)s'
class _DeprecatedLogger(object):
def __init__(self, logger, name):
self._logger = logger
self._name = name
def _warn(self):
warnings.warn(
"{} is deprecated, use a module-level logger".format(self._name),
DeprecationWarning)
def debug(self, *args, **kwargs):
self._warn()
self._logger.debug(*args, **kwargs)
def info(self, *args, **kwargs):
self._warn()
self._logger.info(*args, **kwargs)
def warning(self, *args, **kwargs):
self._warn()
self._logger.warning(*args, **kwargs)
def error(self, *args, **kwargs):
self._warn()
self._logger.error(*args, **kwargs)
def critical(self, *args, **kwargs):
self._warn()
self._logger.critical(*args, **kwargs)
def exception(self, *args, **kwargs):
self._warn()
self._logger.exception(*args, **kwargs)
def get_logger(who, bind_logger_names=False): def get_logger(who, bind_logger_names=False):
if isinstance(who, six.string_types): if isinstance(who, six.string_types):
logger_name = who logger_name = who
else: else:
logger_name = '%s.%s' % (who.__module__, who.__class__.__name__) caller_globals = sys._getframe(1).f_globals
logger_name = caller_globals.get('__name__', '__main__')
if logger_name == '__main__':
logger_name = caller_globals.get('__file__', logger_name)
logger_name = os.path.basename(logger_name)
logger = logging.getLogger(logger_name) logger = logging.getLogger(logger_name)
if not isinstance(who, six.string_types):
obj_name = '%s.%s' % (who.__module__, who.__class__.__name__)
logger = _DeprecatedLogger(logger, obj_name)
if bind_logger_names: if bind_logger_names:
method = 'log' method = 'log'
if hasattr(who, method): if hasattr(who, method):

View File

@@ -20,6 +20,7 @@
# #
import binascii import binascii
import logging
import time import time
import datetime import datetime
from decimal import Decimal from decimal import Decimal
@@ -44,7 +45,6 @@ from ipalib import errors, _
from ipalib.constants import LDAP_GENERALIZED_TIME_FORMAT from ipalib.constants import LDAP_GENERALIZED_TIME_FORMAT
# pylint: enable=ipa-forbidden-import # pylint: enable=ipa-forbidden-import
from ipapython.ipautil import format_netloc, CIDict from ipapython.ipautil import format_netloc, CIDict
from ipapython.ipa_log_manager import log_mgr
from ipapython.dn import DN from ipapython.dn import DN
from ipapython.dnsutil import DNSName from ipapython.dnsutil import DNSName
from ipapython.kerberos import Principal from ipapython.kerberos import Principal
@@ -52,6 +52,8 @@ from ipapython.kerberos import Principal
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
# Global variable to define SASL auth # Global variable to define SASL auth
SASL_GSSAPI = ldap.sasl.sasl({}, 'GSSAPI') SASL_GSSAPI = ldap.sasl.sasl({}, 'GSSAPI')
SASL_GSS_SPNEGO = ldap.sasl.sasl({}, 'GSS-SPNEGO') SASL_GSS_SPNEGO = ldap.sasl.sasl({}, 'GSS-SPNEGO')
@@ -89,7 +91,6 @@ class SchemaCache(object):
''' '''
def __init__(self): def __init__(self):
self.log = log_mgr.get_logger(self)
self.servers = {} self.servers = {}
def get_schema(self, url, conn, force_update=False): def get_schema(self, url, conn, force_update=False):
@@ -113,7 +114,7 @@ class SchemaCache(object):
return server_schema.schema return server_schema.schema
def flush(self, url): def flush(self, url):
self.log.debug('flushing %s from SchemaCache', url) logger.debug('flushing %s from SchemaCache', url)
try: try:
del self.servers[url] del self.servers[url]
except KeyError: except KeyError:
@@ -134,7 +135,7 @@ class SchemaCache(object):
""" """
assert conn is not None assert conn is not None
self.log.debug( logger.debug(
'retrieving schema for SchemaCache url=%s conn=%s', url, conn) 'retrieving schema for SchemaCache url=%s conn=%s', url, conn)
try: try:
@@ -144,7 +145,7 @@ class SchemaCache(object):
except ldap.NO_SUCH_OBJECT: except ldap.NO_SUCH_OBJECT:
# try different location for schema # try different location for schema
# openldap has schema located in cn=subschema # openldap has schema located in cn=subschema
self.log.debug('cn=schema not found, fallback to cn=subschema') logger.debug('cn=schema not found, fallback to cn=subschema')
schema_entry = conn.search_s('cn=subschema', ldap.SCOPE_BASE, schema_entry = conn.search_s('cn=subschema', ldap.SCOPE_BASE,
attrlist=['attributetypes', 'objectclasses'])[0] attrlist=['attributetypes', 'objectclasses'])[0]
except ldap.SERVER_DOWN: except ldap.SERVER_DOWN:
@@ -721,7 +722,6 @@ class LDAPClient(object):
self._cacert = cacert self._cacert = cacert
self._sasl_nocanon = sasl_nocanon self._sasl_nocanon = sasl_nocanon
self.log = log_mgr.get_logger(self)
self._has_schema = False self._has_schema = False
self._schema = None self._schema = None
@@ -896,7 +896,7 @@ class LDAPClient(object):
return target_type(val) return target_type(val)
except Exception: except Exception:
msg = 'unable to convert the attribute %r value %r to type %s' % (attr, val, target_type) msg = 'unable to convert the attribute %r value %r to type %s' % (attr, val, target_type)
self.log.error(msg) logger.error('%s', msg)
raise ValueError(msg) raise ValueError(msg)
elif isinstance(val, list): elif isinstance(val, list):
return [self.decode(m, attr) for m in val] return [self.decode(m, attr) for m in val]
@@ -933,7 +933,7 @@ class LDAPClient(object):
if original_dn is None: if original_dn is None:
log_msg = 'Referral entry ignored: {ref}'\ log_msg = 'Referral entry ignored: {ref}'\
.format(ref=str(original_attrs)) .format(ref=str(original_attrs))
self.log.debug(log_msg) logger.debug('%s', log_msg)
continue continue
@@ -946,7 +946,7 @@ class LDAPClient(object):
ipa_result.append(ipa_entry) ipa_result.append(ipa_entry)
if _debug_log_ldap: if _debug_log_ldap:
self.log.debug('ldap.result: %s', ipa_result) logger.debug('ldap.result: %s', ipa_result)
return ipa_result return ipa_result
@contextlib.contextmanager @contextlib.contextmanager
@@ -1019,8 +1019,8 @@ class LDAPClient(object):
if 'NOT_ALLOWED_TO_DELEGATE' in info: if 'NOT_ALLOWED_TO_DELEGATE' in info:
raise errors.ACIError( raise errors.ACIError(
info="KDC returned NOT_ALLOWED_TO_DELEGATE") info="KDC returned NOT_ALLOWED_TO_DELEGATE")
self.log.debug( logger.debug(
'Unhandled LDAPError: %s: %s' % (type(e).__name__, str(e))) 'Unhandled LDAPError: %s: %s', type(e).__name__, str(e))
raise errors.DatabaseError(desc=desc, info=info) raise errors.DatabaseError(desc=desc, info=info)
@staticmethod @staticmethod
@@ -1317,10 +1317,9 @@ class LDAPClient(object):
try: try:
self.handle_truncated_result(truncated) self.handle_truncated_result(truncated)
except errors.LimitsExceeded as e: except errors.LimitsExceeded as e:
self.log.error( logger.error(
"{} while getting entries (base DN: {}, filter: {})".format( "%s while getting entries (base DN: %s, filter: %s)",
e, base_dn, filter e, base_dn, filter
)
) )
raise raise
@@ -1429,7 +1428,7 @@ class LDAPClient(object):
serverctrls=sctrls, timeout=time_limit, serverctrls=sctrls, timeout=time_limit,
sizelimit=size_limit) sizelimit=size_limit)
except ldap.LDAPError as e: except ldap.LDAPError as e:
self.log.warning( logger.warning(
"Error cancelling paged search: %s", e) "Error cancelling paged search: %s", e)
cookie = '' cookie = ''

View File

@@ -507,7 +507,7 @@ class IpaAdvise(admintool.AdminTool):
advise_api.finalize() advise_api.finalize()
if not self.options.verbose: if not self.options.verbose:
# Do not print connection information by default # Do not print connection information by default
logger_name = r'ipalib\.plugins\.rpcclient' logger_name = r'ipalib\.rpc'
root_logger.addFilter(Filter(logger_name, logging.WARNING)) root_logger.addFilter(Filter(logger_name, logging.WARNING))
# With no argument, print the list out and exit # With no argument, print the list out and exit

View File

@@ -3,6 +3,8 @@
# #
from datetime import datetime from datetime import datetime
import logging
import dns.name import dns.name
import errno import errno
import os import os
@@ -11,11 +13,13 @@ import stat
import ipalib.constants import ipalib.constants
from ipapython.dn import DN from ipapython.dn import DN
from ipapython import ipa_log_manager, ipautil from ipapython import ipautil
from ipaplatform.paths import paths from ipaplatform.paths import paths
from ipaserver.dnssec.temp import TemporaryDirectory from ipaserver.dnssec.temp import TemporaryDirectory
logger = logging.getLogger(__name__)
time_bindfmt = '%Y%m%d%H%M%S' time_bindfmt = '%Y%m%d%H%M%S'
# this daemon should run under ods:named user:group # this daemon should run under ods:named user:group
@@ -31,14 +35,13 @@ class BINDMgr(object):
""" """
def __init__(self, api): def __init__(self, api):
self.api = api self.api = api
self.log = ipa_log_manager.log_mgr.get_logger(self)
self.ldap_keys = {} self.ldap_keys = {}
self.modified_zones = set() self.modified_zones = set()
def notify_zone(self, zone): def notify_zone(self, zone):
cmd = ['rndc', 'sign', zone.to_text()] cmd = ['rndc', 'sign', zone.to_text()]
result = ipautil.run(cmd, capture_output=True) result = ipautil.run(cmd, capture_output=True)
self.log.info('%s', result.output_log) logger.info('%s', result.output_log)
def dn2zone_name(self, dn): def dn2zone_name(self, dn):
"""cn=KSK-20140813162153Z-cede9e182fc4af76c4bddbc19123a565,cn=keys,idnsname=test,cn=dns,dc=ipa,dc=example""" """cn=KSK-20140813162153Z-cede9e182fc4af76c4bddbc19123a565,cn=keys,idnsname=test,cn=dns,dc=ipa,dc=example"""
@@ -83,21 +86,24 @@ class BINDMgr(object):
self.modified_zones.add(zone) self.modified_zones.add(zone)
zone_keys = self.ldap_keys.setdefault(zone, {}) zone_keys = self.ldap_keys.setdefault(zone, {})
if op == 'add': if op == 'add':
self.log.info('Key metadata %s added to zone %s' % (attrs['dn'], zone)) logger.info('Key metadata %s added to zone %s',
attrs['dn'], zone)
zone_keys[uuid] = attrs zone_keys[uuid] = attrs
elif op == 'del': elif op == 'del':
self.log.info('Key metadata %s deleted from zone %s' % (attrs['dn'], zone)) logger.info('Key metadata %s deleted from zone %s',
attrs['dn'], zone)
zone_keys.pop(uuid) zone_keys.pop(uuid)
elif op == 'mod': elif op == 'mod':
self.log.info('Key metadata %s updated in zone %s' % (attrs['dn'], zone)) logger.info('Key metadata %s updated in zone %s',
attrs['dn'], zone)
zone_keys[uuid] = attrs zone_keys[uuid] = attrs
def install_key(self, zone, uuid, attrs, workdir): def install_key(self, zone, uuid, attrs, workdir):
"""Run dnssec-keyfromlabel on given LDAP object. """Run dnssec-keyfromlabel on given LDAP object.
:returns: base file name of output files, e.g. Kaaa.test.+008+19719""" :returns: base file name of output files, e.g. Kaaa.test.+008+19719"""
self.log.info('attrs: %s', attrs) logger.info('attrs: %s', attrs)
assert attrs.get('idnsseckeyzone', ['FALSE'])[0] == 'TRUE', \ assert attrs.get('idnsseckeyzone', ['FALSE'])[0] == 'TRUE', \
'object %s is not a DNS zone key' % attrs['dn'] 'object %s is not a DNS zone key' % attrs['dn']
@@ -152,7 +158,7 @@ class BINDMgr(object):
return escaped[:-1] return escaped[:-1]
def sync_zone(self, zone): def sync_zone(self, zone):
self.log.info('Synchronizing zone %s' % zone) logger.info('Synchronizing zone %s', zone)
zone_path = os.path.join(paths.BIND_LDAP_DNS_ZONE_WORKDIR, zone_path = os.path.join(paths.BIND_LDAP_DNS_ZONE_WORKDIR,
self.get_zone_dir_name(zone)) self.get_zone_dir_name(zone))
try: try:
@@ -166,11 +172,11 @@ class BINDMgr(object):
for prefix, dirs, files in os.walk(paths.DNSSEC_TOKENS_DIR, topdown=True): for prefix, dirs, files in os.walk(paths.DNSSEC_TOKENS_DIR, topdown=True):
for name in dirs: for name in dirs:
fpath = os.path.join(prefix, name) fpath = os.path.join(prefix, name)
self.log.debug('Fixing directory permissions: %s', fpath) logger.debug('Fixing directory permissions: %s', fpath)
os.chmod(fpath, DIR_PERM | stat.S_ISGID) os.chmod(fpath, DIR_PERM | stat.S_ISGID)
for name in files: for name in files:
fpath = os.path.join(prefix, name) fpath = os.path.join(prefix, name)
self.log.debug('Fixing file permissions: %s', fpath) logger.debug('Fixing file permissions: %s', fpath)
os.chmod(fpath, FILE_PERM) os.chmod(fpath, FILE_PERM)
# TODO: move out # TODO: move out
@@ -199,9 +205,9 @@ class BINDMgr(object):
have old metadata objects and DNSSEC disabled. Such zones must be have old metadata objects and DNSSEC disabled. Such zones must be
ignored to prevent errors while calling dnssec-keyfromlabel or rndc. ignored to prevent errors while calling dnssec-keyfromlabel or rndc.
""" """
self.log.debug('Key metadata in LDAP: %s' % self.ldap_keys) logger.debug('Key metadata in LDAP: %s', self.ldap_keys)
self.log.debug('Zones modified but skipped during bindmgr.sync: %s', logger.debug('Zones modified but skipped during bindmgr.sync: %s',
self.modified_zones - dnssec_zones) self.modified_zones - dnssec_zones)
for zone in self.modified_zones.intersection(dnssec_zones): for zone in self.modified_zones.intersection(dnssec_zones):
self.sync_zone(zone) self.sync_zone(zone)

View File

@@ -2,6 +2,8 @@
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license # Copyright (C) 2014 FreeIPA Contributors see COPYING for license
# #
import logging
import ldap.dn import ldap.dn
import os import os
@@ -14,6 +16,8 @@ from ipaserver.dnssec.syncrepl import SyncReplConsumer
from ipaserver.dnssec.odsmgr import ODSMgr from ipaserver.dnssec.odsmgr import ODSMgr
from ipaserver.dnssec.bindmgr import BINDMgr from ipaserver.dnssec.bindmgr import BINDMgr
logger = logging.getLogger(__name__)
SIGNING_ATTR = 'idnsSecInlineSigning' SIGNING_ATTR = 'idnsSecInlineSigning'
OBJCLASS_ATTR = 'objectClass' OBJCLASS_ATTR = 'objectClass'
@@ -109,7 +113,8 @@ class KeySyncer(SyncReplConsumer):
self.hsm_master_sync() self.hsm_master_sync()
def syncrepl_refreshdone(self): def syncrepl_refreshdone(self):
self.log.info('Initial LDAP dump is done, sychronizing with ODS and BIND') logger.info('Initial LDAP dump is done, sychronizing with ODS and '
'BIND')
self.init_done = True self.init_done = True
self.ods_sync() self.ods_sync()
self.hsm_replica_sync() self.hsm_replica_sync()

View File

@@ -6,6 +6,7 @@ from __future__ import print_function
from binascii import hexlify from binascii import hexlify
import collections import collections
import logging
from pprint import pprint from pprint import pprint
import ipalib import ipalib
@@ -22,6 +23,9 @@ from ipaserver.dnssec.abshsm import (
from ipaserver import p11helper as _ipap11helper from ipaserver import p11helper as _ipap11helper
import uuid import uuid
logger = logging.getLogger(__name__)
def uri_escape(val): def uri_escape(val):
"""convert val to %-notation suitable for ID component in URI""" """convert val to %-notation suitable for ID component in URI"""
assert len(val) > 0, "zero-length URI component detected" assert len(val) > 0, "zero-length URI component detected"
@@ -119,7 +123,7 @@ class Key(collections.MutableMapping):
self._delentry = None # indicates that object was deleted self._delentry = None # indicates that object was deleted
self.ldap = ldap self.ldap = ldap
self.ldapkeydb = ldapkeydb self.ldapkeydb = ldapkeydb
self.log = ldap.log.getChild(__name__) self.log = logger.getChild(__name__)
def __assert_not_deleted(self): def __assert_not_deleted(self):
assert self.entry and not self._delentry, ( assert self.entry and not self._delentry, (

View File

@@ -3,6 +3,8 @@
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license # Copyright (C) 2014 FreeIPA Contributors see COPYING for license
# #
import logging
import dns.name import dns.name
try: try:
from xml.etree import cElementTree as etree from xml.etree import cElementTree as etree
@@ -11,6 +13,8 @@ except ImportError:
from ipapython import ipa_log_manager, ipautil from ipapython import ipa_log_manager, ipautil
logger = logging.getLogger(__name__)
# hack: zone object UUID is stored as path to imaginary zone file # hack: zone object UUID is stored as path to imaginary zone file
ENTRYUUID_PREFIX = "/var/lib/ipa/dns/zone/entryUUID/" ENTRYUUID_PREFIX = "/var/lib/ipa/dns/zone/entryUUID/"
ENTRYUUID_PREFIX_LEN = len(ENTRYUUID_PREFIX) ENTRYUUID_PREFIX_LEN = len(ENTRYUUID_PREFIX)
@@ -21,7 +25,6 @@ class ZoneListReader(object):
self.names = set() # dns.name self.names = set() # dns.name
self.uuids = set() # UUID strings self.uuids = set() # UUID strings
self.mapping = dict() # {UUID: dns.name} self.mapping = dict() # {UUID: dns.name}
self.log = ipa_log_manager.log_mgr.get_logger(self)
def _add_zone(self, name, zid): def _add_zone(self, name, zid):
"""Add zone & UUID to internal structures. """Add zone & UUID to internal structures.
@@ -125,7 +128,6 @@ class ODSMgr(object):
has to be solved seperatelly. has to be solved seperatelly.
""" """
def __init__(self): def __init__(self):
self.log = ipa_log_manager.log_mgr.get_logger(self)
self.zl_ldap = LDAPZoneListReader() self.zl_ldap = LDAPZoneListReader()
def ksmutil(self, params): def ksmutil(self, params):
@@ -146,7 +148,7 @@ class ODSMgr(object):
zone_path = '%s%s' % (ENTRYUUID_PREFIX, uuid) zone_path = '%s%s' % (ENTRYUUID_PREFIX, uuid)
cmd = ['zone', 'add', '--zone', str(name), '--input', zone_path] cmd = ['zone', 'add', '--zone', str(name), '--input', zone_path]
output = self.ksmutil(cmd) output = self.ksmutil(cmd)
self.log.info(output) logger.info('%s', output)
self.notify_enforcer() self.notify_enforcer()
def del_ods_zone(self, name): def del_ods_zone(self, name):
@@ -157,19 +159,19 @@ class ODSMgr(object):
name = dns.name.root name = dns.name.root
cmd = ['zone', 'delete', '--zone', str(name)] cmd = ['zone', 'delete', '--zone', str(name)]
output = self.ksmutil(cmd) output = self.ksmutil(cmd)
self.log.info(output) logger.info('%s', output)
self.notify_enforcer() self.notify_enforcer()
self.cleanup_signer(name) self.cleanup_signer(name)
def notify_enforcer(self): def notify_enforcer(self):
cmd = ['notify'] cmd = ['notify']
output = self.ksmutil(cmd) output = self.ksmutil(cmd)
self.log.info(output) logger.info('%s', output)
def cleanup_signer(self, zone_name): def cleanup_signer(self, zone_name):
cmd = ['ods-signer', 'ldap-cleanup', str(zone_name)] cmd = ['ods-signer', 'ldap-cleanup', str(zone_name)]
output = ipautil.run(cmd, capture_output=True) output = ipautil.run(cmd, capture_output=True)
self.log.info(output) logger.info('%s', output)
def ldap_event(self, op, uuid, attrs): def ldap_event(self, op, uuid, attrs):
"""Record single LDAP event - zone addition or deletion. """Record single LDAP event - zone addition or deletion.
@@ -178,16 +180,16 @@ class ODSMgr(object):
self.sync() have to be called to synchronize change to ODS.""" self.sync() have to be called to synchronize change to ODS."""
assert op == 'add' or op == 'del' assert op == 'add' or op == 'del'
self.zl_ldap.process_ipa_zone(op, uuid, attrs) self.zl_ldap.process_ipa_zone(op, uuid, attrs)
self.log.debug("LDAP zones: %s", self.zl_ldap.mapping) logger.debug("LDAP zones: %s", self.zl_ldap.mapping)
def sync(self): def sync(self):
"""Synchronize list of zones in LDAP with ODS.""" """Synchronize list of zones in LDAP with ODS."""
zl_ods = self.get_ods_zonelist() zl_ods = self.get_ods_zonelist()
self.log.debug("ODS zones: %s", zl_ods.mapping) logger.debug("ODS zones: %s", zl_ods.mapping)
removed = self.diff_zl(zl_ods, self.zl_ldap) removed = self.diff_zl(zl_ods, self.zl_ldap)
self.log.info("Zones removed from LDAP: %s", removed) logger.info("Zones removed from LDAP: %s", removed)
added = self.diff_zl(self.zl_ldap, zl_ods) added = self.diff_zl(self.zl_ldap, zl_ods)
self.log.info("Zones added to LDAP: %s", added) logger.info("Zones added to LDAP: %s", added)
for (uuid, name) in removed: for (uuid, name) in removed:
self.del_ods_zone(name) self.del_ods_zone(name)
for (uuid, name) in added: for (uuid, name) in added:

View File

@@ -7,6 +7,8 @@ This script implements a syncrepl consumer which syncs data from server
to a local dict. to a local dict.
""" """
import logging
# Import the python-ldap modules # Import the python-ldap modules
import ldap import ldap
# Import specific classes from python-ldap # Import specific classes from python-ldap
@@ -14,7 +16,7 @@ from ldap.cidict import cidict
from ldap.ldapobject import ReconnectLDAPObject from ldap.ldapobject import ReconnectLDAPObject
from ldap.syncrepl import SyncreplConsumer from ldap.syncrepl import SyncreplConsumer
from ipapython import ipa_log_manager logger = logging.getLogger(__name__)
class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer): class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
@@ -23,7 +25,6 @@ class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
""" """
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
self.log = ipa_log_manager.log_mgr.get_logger(self)
# Initialise the LDAP Connection first # Initialise the LDAP Connection first
ldap.ldapobject.ReconnectLDAPObject.__init__(self, *args, **kwargs) ldap.ldapobject.ReconnectLDAPObject.__init__(self, *args, **kwargs)
# Now prepare the data store # Now prepare the data store
@@ -39,13 +40,13 @@ class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
def syncrepl_get_cookie(self): def syncrepl_get_cookie(self):
if 'cookie' in self.__data: if 'cookie' in self.__data:
cookie = self.__data['cookie'] cookie = self.__data['cookie']
self.log.debug('Current cookie is: %s', cookie) logger.debug('Current cookie is: %s', cookie)
return cookie return cookie
else: else:
self.log.debug('Current cookie is: None (not received yet)') logger.debug('Current cookie is: None (not received yet)')
def syncrepl_set_cookie(self, cookie): def syncrepl_set_cookie(self, cookie):
self.log.debug('New cookie is: %s', cookie) logger.debug('New cookie is: %s', cookie)
self.__data['cookie'] = cookie self.__data['cookie'] = cookie
def syncrepl_entry(self, dn, attributes, uuid): def syncrepl_entry(self, dn, attributes, uuid):
@@ -63,7 +64,7 @@ class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
attributes['dn'] = dn attributes['dn'] = dn
self.__data['uuids'][uuid] = attributes self.__data['uuids'][uuid] = attributes
# Debugging # Debugging
self.log.debug('Detected %s of entry: %s %s', change_type, dn, uuid) logger.debug('Detected %s of entry: %s %s', change_type, dn, uuid)
if change_type == 'modify': if change_type == 'modify':
self.application_sync(uuid, dn, attributes, previous_attributes) self.application_sync(uuid, dn, attributes, previous_attributes)
else: else:
@@ -76,7 +77,7 @@ class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
for uuid in uuids: for uuid in uuids:
attributes = self.__data['uuids'][uuid] attributes = self.__data['uuids'][uuid]
dn = attributes['dn'] dn = attributes['dn']
self.log.debug('Detected deletion of entry: %s %s', dn, uuid) logger.debug('Detected deletion of entry: %s %s', dn, uuid)
self.application_del(uuid, dn, attributes) self.application_del(uuid, dn, attributes)
del self.__data['uuids'][uuid] del self.__data['uuids'][uuid]
@@ -99,17 +100,17 @@ class SyncReplConsumer(ReconnectLDAPObject, SyncreplConsumer):
self.__presentUUIDs[uuid] = True self.__presentUUIDs[uuid] = True
def application_add(self, uuid, dn, attributes): def application_add(self, uuid, dn, attributes):
self.log.info('Performing application add for: %s %s', dn, uuid) logger.info('Performing application add for: %s %s', dn, uuid)
self.log.debug('New attributes: %s', attributes) logger.debug('New attributes: %s', attributes)
return True return True
def application_sync(self, uuid, dn, attributes, previous_attributes): def application_sync(self, uuid, dn, attributes, previous_attributes):
self.log.info('Performing application sync for: %s %s', dn, uuid) logger.info('Performing application sync for: %s %s', dn, uuid)
self.log.debug('Old attributes: %s', previous_attributes) logger.debug('Old attributes: %s', previous_attributes)
self.log.debug('New attributes: %s', attributes) logger.debug('New attributes: %s', attributes)
return True return True
def application_del(self, uuid, dn, previous_attributes): def application_del(self, uuid, dn, previous_attributes):
self.log.info('Performing application delete for: %s %s', dn, uuid) logger.info('Performing application delete for: %s %s', dn, uuid)
self.log.debug('Old attributes: %s', previous_attributes) logger.debug('Old attributes: %s', previous_attributes)
return True return True

View File

@@ -22,6 +22,8 @@
from __future__ import print_function from __future__ import print_function
import base64 import base64
import logging
import dbus import dbus
import ldap import ldap
import os import os
@@ -54,8 +56,7 @@ from ipapython import ipautil
from ipapython import ipaldap from ipapython import ipaldap
from ipapython.certdb import get_ca_nickname from ipapython.certdb import get_ca_nickname
from ipapython.dn import DN from ipapython.dn import DN
from ipapython.ipa_log_manager import log_mgr,\ from ipapython.ipa_log_manager import standard_logging_setup, root_logger
standard_logging_setup, root_logger
from ipaserver.secrets.kem import IPAKEMKeys from ipaserver.secrets.kem import IPAKEMKeys
from ipaserver.install import certs from ipaserver.install import certs
@@ -68,6 +69,8 @@ from ipaserver.install import sysupgrade
from ipaserver.install.dogtaginstance import DogtagInstance from ipaserver.install.dogtaginstance import DogtagInstance
from ipaserver.plugins import ldap2 from ipaserver.plugins import ldap2
logger = logging.getLogger(__name__)
# We need to reset the template because the CA uses the regular boot # We need to reset the template because the CA uses the regular boot
# information # information
INF_TEMPLATE = """ INF_TEMPLATE = """
@@ -306,7 +309,6 @@ class CAInstance(DogtagInstance):
self.canickname = None self.canickname = None
self.ra_cert = None self.ra_cert = None
self.requestId = None self.requestId = None
self.log = log_mgr.get_logger(self)
self.no_db_setup = False self.no_db_setup = False
self.keytab = os.path.join( self.keytab = os.path.join(
paths.PKI_TOMCAT, self.service_prefix + '.keytab') paths.PKI_TOMCAT, self.service_prefix + '.keytab')
@@ -625,7 +627,7 @@ class CAInstance(DogtagInstance):
shutil.move(paths.CA_BACKUP_KEYS_P12, shutil.move(paths.CA_BACKUP_KEYS_P12,
paths.CACERT_P12) paths.CACERT_P12)
self.log.debug("completed creating ca instance") logger.debug("completed creating ca instance")
def backup_config(self): def backup_config(self):
try: try:
@@ -982,22 +984,22 @@ class CAInstance(DogtagInstance):
cmonger.stop() cmonger.stop()
# remove CRL files # remove CRL files
self.log.info("Remove old CRL files") logger.info("Remove old CRL files")
try: try:
for f in get_crl_files(): for f in get_crl_files():
self.log.debug("Remove %s", f) logger.debug("Remove %s", f)
installutils.remove_file(f) installutils.remove_file(f)
except OSError as e: except OSError as e:
self.log.warning("Error while removing old CRL files: %s", e) logger.warning("Error while removing old CRL files: %s", e)
# remove CRL directory # remove CRL directory
self.log.info("Remove CRL directory") logger.info("Remove CRL directory")
if os.path.exists(paths.PKI_CA_PUBLISH_DIR): if os.path.exists(paths.PKI_CA_PUBLISH_DIR):
try: try:
shutil.rmtree(paths.PKI_CA_PUBLISH_DIR) shutil.rmtree(paths.PKI_CA_PUBLISH_DIR)
except OSError as e: except OSError as e:
self.log.warning("Error while removing CRL publish " logger.warning("Error while removing CRL publish "
"directory: %s", e) "directory: %s", e)
def unconfigure_certmonger_renewal_guard(self): def unconfigure_certmonger_renewal_guard(self):
if not self.is_configured(): if not self.is_configured():
@@ -1026,7 +1028,7 @@ class CAInstance(DogtagInstance):
post_command='renew_ra_cert', post_command='renew_ra_cert',
storage='FILE') storage='FILE')
except RuntimeError as e: except RuntimeError as e:
self.log.error( logger.error(
"certmonger failed to start tracking certificate: %s", e) "certmonger failed to start tracking certificate: %s", e)
def stop_tracking_certificates(self): def stop_tracking_certificates(self):
@@ -1061,7 +1063,7 @@ class CAInstance(DogtagInstance):
'policyset.caLogSigningSet.2.default.params.range', 'policyset.caLogSigningSet.2.default.params.range',
separator='=' separator='='
) )
self.log.debug( logger.debug(
'caSignedLogCert.cfg profile validity range is %s', cert_range) 'caSignedLogCert.cfg profile validity range is %s', cert_range)
if cert_range == "180": if cert_range == "180":
installutils.set_directive( installutils.set_directive(
@@ -1078,7 +1080,7 @@ class CAInstance(DogtagInstance):
quotes=False, quotes=False,
separator='=' separator='='
) )
self.log.debug( logger.debug(
'updated caSignedLogCert.cfg profile validity range to 720') 'updated caSignedLogCert.cfg profile validity range to 720')
return True return True
return False return False
@@ -1264,7 +1266,7 @@ class CAInstance(DogtagInstance):
filter='(objectclass=ipaca)', filter='(objectclass=ipaca)',
attrs_list=['cn', 'ipacaid'], attrs_list=['cn', 'ipacaid'],
) )
add_lightweight_ca_tracking_requests(self.log, lwcas) add_lightweight_ca_tracking_requests(lwcas)
except errors.NotFound: except errors.NotFound:
# shouldn't happen, but don't fail if it does # shouldn't happen, but don't fail if it does
root_logger.warning( root_logger.warning(
@@ -1791,7 +1793,7 @@ def ensure_default_caacl():
certprofile=(u'caIPAserviceCert',)) certprofile=(u'caIPAserviceCert',))
def add_lightweight_ca_tracking_requests(logger, lwcas): def add_lightweight_ca_tracking_requests(lwcas):
"""Add tracking requests for the given lightweight CAs. """Add tracking requests for the given lightweight CAs.
The entries must have the 'cn' and 'ipacaid' attributes. The entries must have the 'cn' and 'ipacaid' attributes.

View File

@@ -18,6 +18,8 @@
# #
import base64 import base64
import logging
import ldap import ldap
import os import os
import shutil import shutil
@@ -39,7 +41,8 @@ from ipaserver.install import service
from ipaserver.install import installutils from ipaserver.install import installutils
from ipaserver.install import replication from ipaserver.install import replication
from ipaserver.install.installutils import stopped_service from ipaserver.install.installutils import stopped_service
from ipapython.ipa_log_manager import log_mgr
logger = logging.getLogger(__name__)
def get_security_domain(): def get_security_domain():
@@ -115,8 +118,6 @@ class DogtagInstance(service.Service):
self.subject_base = None self.subject_base = None
self.nss_db = nss_db self.nss_db = nss_db
self.log = log_mgr.get_logger(self)
def is_installed(self): def is_installed(self):
""" """
Determine if subsystem instance has been installed. Determine if subsystem instance has been installed.
@@ -138,7 +139,7 @@ class DogtagInstance(service.Service):
"-f", cfg_file] "-f", cfg_file]
with open(cfg_file) as f: with open(cfg_file) as f:
self.log.debug( logger.debug(
'Contents of pkispawn configuration file (%s):\n%s', 'Contents of pkispawn configuration file (%s):\n%s',
cfg_file, ipautil.nolog_replace(f.read(), nolog_list)) cfg_file, ipautil.nolog_replace(f.read(), nolog_list))
@@ -165,8 +166,8 @@ class DogtagInstance(service.Service):
try: try:
self.stop('pki-tomcat') self.stop('pki-tomcat')
except Exception: except Exception:
self.log.debug(traceback.format_exc()) logger.debug("%s", traceback.format_exc())
self.log.critical( logger.critical(
"Failed to stop the Dogtag instance." "Failed to stop the Dogtag instance."
"See the installation log for details.") "See the installation log for details.")
@@ -223,8 +224,8 @@ class DogtagInstance(service.Service):
"-i", 'pki-tomcat', "-i", 'pki-tomcat',
"-s", self.subsystem]) "-s", self.subsystem])
except ipautil.CalledProcessError as e: except ipautil.CalledProcessError as e:
self.log.critical("failed to uninstall %s instance %s", logger.critical("failed to uninstall %s instance %s",
self.subsystem, e) self.subsystem, e)
def http_proxy(self): def http_proxy(self):
""" Update the http proxy file """ """ Update the http proxy file """
@@ -267,7 +268,7 @@ class DogtagInstance(service.Service):
try: try:
return certmonger.get_pin('internal') return certmonger.get_pin('internal')
except IOError as e: except IOError as e:
self.log.debug( logger.debug(
'Unable to determine PIN for the Dogtag instance: %s', e) 'Unable to determine PIN for the Dogtag instance: %s', e)
raise RuntimeError(e) raise RuntimeError(e)
@@ -286,7 +287,7 @@ class DogtagInstance(service.Service):
post_command='renew_ca_cert "%s"' % nickname, post_command='renew_ca_cert "%s"' % nickname,
) )
except RuntimeError as e: except RuntimeError as e:
self.log.error( logger.error(
"certmonger failed to start tracking certificate: %s", e) "certmonger failed to start tracking certificate: %s", e)
def track_servercert(self): def track_servercert(self):
@@ -305,8 +306,8 @@ class DogtagInstance(service.Service):
pre_command='stop_pkicad', pre_command='stop_pkicad',
post_command='renew_ca_cert "%s"' % self.server_cert_name) post_command='renew_ca_cert "%s"' % self.server_cert_name)
except RuntimeError as e: except RuntimeError as e:
self.log.error( logger.error(
"certmonger failed to start tracking certificate: %s" % e) "certmonger failed to start tracking certificate: %s", e)
def stop_tracking_certificates(self, stop_certmonger=True): def stop_tracking_certificates(self, stop_certmonger=True):
"""Stop tracking our certificates. Called on uninstall. """Stop tracking our certificates. Called on uninstall.
@@ -328,7 +329,7 @@ class DogtagInstance(service.Service):
certmonger.stop_tracking( certmonger.stop_tracking(
self.nss_db, nickname=nickname) self.nss_db, nickname=nickname)
except RuntimeError as e: except RuntimeError as e:
self.log.error( logger.error(
"certmonger failed to stop tracking certificate: %s", e) "certmonger failed to stop tracking certificate: %s", e)
if stop_certmonger: if stop_certmonger:
@@ -358,7 +359,7 @@ class DogtagInstance(service.Service):
Get the certificate for the admin user by checking the ldap entry Get the certificate for the admin user by checking the ldap entry
for the user. There should be only one certificate per user. for the user. There should be only one certificate per user.
""" """
self.log.debug('Trying to find the certificate for the admin user') logger.debug('Trying to find the certificate for the admin user')
conn = None conn = None
try: try:
@@ -377,11 +378,11 @@ class DogtagInstance(service.Service):
return base64.b64encode(admin_cert) return base64.b64encode(admin_cert)
def handle_setup_error(self, e): def handle_setup_error(self, e):
self.log.critical("Failed to configure %s instance: %s" logger.critical("Failed to configure %s instance: %s",
% (self.subsystem, e)) self.subsystem, e)
self.log.critical("See the installation logs and the following " logger.critical("See the installation logs and the following "
"files/directories for more information:") "files/directories for more information:")
self.log.critical(" %s" % paths.TOMCAT_TOPLEVEL_DIR) logger.critical(" %s", paths.TOMCAT_TOPLEVEL_DIR)
raise RuntimeError("%s configuration failed." % self.subsystem) raise RuntimeError("%s configuration failed." % self.subsystem)

View File

@@ -17,6 +17,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
# #
import logging
import os import os
import shutil import shutil
import tempfile import tempfile
@@ -48,6 +49,8 @@ from ipaplatform.tasks import tasks
ISO8601_DATETIME_FMT = '%Y-%m-%dT%H:%M:%S' ISO8601_DATETIME_FMT = '%Y-%m-%dT%H:%M:%S'
logger = logging.getLogger(__name__)
""" """
A test gpg can be generated like this: A test gpg can be generated like this:
@@ -274,7 +277,7 @@ class Backup(admintool.AdminTool):
api.bootstrap(in_server=True, context='backup', confdir=paths.ETC_IPA) api.bootstrap(in_server=True, context='backup', confdir=paths.ETC_IPA)
api.finalize() api.finalize()
self.log.info("Preparing backup on %s", api.env.host) logger.info("Preparing backup on %s", api.env.host)
pent = pwd.getpwnam(constants.DS_USER) pent = pwd.getpwnam(constants.DS_USER)
@@ -302,10 +305,10 @@ class Backup(admintool.AdminTool):
self.create_header(options.data_only) self.create_header(options.data_only)
if options.data_only: if options.data_only:
if not options.online: if not options.online:
self.log.info('Stopping Directory Server') logger.info('Stopping Directory Server')
dirsrv.stop(capture_output=False) dirsrv.stop(capture_output=False)
else: else:
self.log.info('Stopping IPA services') logger.info('Stopping IPA services')
run(['ipactl', 'stop']) run(['ipactl', 'stop'])
instance = installutils.realm_to_serverid(api.env.realm) instance = installutils.realm_to_serverid(api.env.realm)
@@ -325,17 +328,17 @@ class Backup(admintool.AdminTool):
if options.data_only: if options.data_only:
if not options.online: if not options.online:
self.log.info('Starting Directory Server') logger.info('Starting Directory Server')
dirsrv.start(capture_output=False) dirsrv.start(capture_output=False)
else: else:
self.log.info('Starting IPA service') logger.info('Starting IPA service')
run(['ipactl', 'start']) run(['ipactl', 'start'])
finally: finally:
try: try:
os.chdir(cwd) os.chdir(cwd)
except Exception as e: except Exception as e:
self.log.error('Cannot change directory to %s: %s' % (cwd, e)) logger.error('Cannot change directory to %s: %s', cwd, e)
shutil.rmtree(self.top_dir) shutil.rmtree(self.top_dir)
@@ -376,8 +379,8 @@ class Backup(admintool.AdminTool):
try: try:
self._conn.external_bind() self._conn.external_bind()
except Exception as e: except Exception as e:
self.log.error("Unable to bind to LDAP server %s: %s" % logger.error("Unable to bind to LDAP server %s: %s",
(self._conn.host, e)) self._conn.host, e)
return self._conn return self._conn
@@ -391,7 +394,7 @@ class Backup(admintool.AdminTool):
For SELinux reasons this writes out to the 389-ds backup location For SELinux reasons this writes out to the 389-ds backup location
and we move it. and we move it.
''' '''
self.log.info('Backing up %s in %s to LDIF' % (backend, instance)) logger.info('Backing up %s in %s to LDIF', backend, instance)
cn = time.strftime('export_%Y_%m_%d_%H_%M_%S') cn = time.strftime('export_%Y_%m_%d_%H_%M_%S')
dn = DN(('cn', cn), ('cn', 'export'), ('cn', 'tasks'), ('cn', 'config')) dn = DN(('cn', cn), ('cn', 'export'), ('cn', 'tasks'), ('cn', 'config'))
@@ -421,7 +424,7 @@ class Backup(admintool.AdminTool):
raise admintool.ScriptError('Unable to add LDIF task: %s' raise admintool.ScriptError('Unable to add LDIF task: %s'
% e) % e)
self.log.info("Waiting for LDIF to finish") logger.info("Waiting for LDIF to finish")
wait_for_task(conn, dn) wait_for_task(conn, dn)
else: else:
args = [paths.DB2LDIF, args = [paths.DB2LDIF,
@@ -431,7 +434,7 @@ class Backup(admintool.AdminTool):
'-a', ldiffile] '-a', ldiffile]
result = run(args, raiseonerr=False) result = run(args, raiseonerr=False)
if result.returncode != 0: if result.returncode != 0:
self.log.critical('db2ldif failed: %s', result.error_log) logger.critical('db2ldif failed: %s', result.error_log)
# Move the LDIF backup to our location # Move the LDIF backup to our location
shutil.move(ldiffile, os.path.join(self.dir, ldifname)) shutil.move(ldiffile, os.path.join(self.dir, ldifname))
@@ -443,7 +446,7 @@ class Backup(admintool.AdminTool):
If executed online create a task and wait for it to complete. If executed online create a task and wait for it to complete.
''' '''
self.log.info('Backing up %s' % instance) logger.info('Backing up %s', instance)
cn = time.strftime('backup_%Y_%m_%d_%H_%M_%S') cn = time.strftime('backup_%Y_%m_%d_%H_%M_%S')
dn = DN(('cn', cn), ('cn', 'backup'), ('cn', 'tasks'), ('cn', 'config')) dn = DN(('cn', cn), ('cn', 'backup'), ('cn', 'tasks'), ('cn', 'config'))
@@ -468,13 +471,13 @@ class Backup(admintool.AdminTool):
raise admintool.ScriptError('Unable to to add backup task: %s' raise admintool.ScriptError('Unable to to add backup task: %s'
% e) % e)
self.log.info("Waiting for BAK to finish") logger.info("Waiting for BAK to finish")
wait_for_task(conn, dn) wait_for_task(conn, dn)
else: else:
args = [paths.DB2BAK, bakdir, '-Z', instance] args = [paths.DB2BAK, bakdir, '-Z', instance]
result = run(args, raiseonerr=False) result = run(args, raiseonerr=False)
if result.returncode != 0: if result.returncode != 0:
self.log.critical('db2bak failed: %s', result.error_log) logger.critical('db2bak failed: %s', result.error_log)
shutil.move(bakdir, self.dir) shutil.move(bakdir, self.dir)
@@ -486,7 +489,7 @@ class Backup(admintool.AdminTool):
tarfile = os.path.join(self.dir, 'files.tar') tarfile = os.path.join(self.dir, 'files.tar')
self.log.info("Backing up files") logger.info("Backing up files")
args = ['tar', args = ['tar',
'--exclude=/var/lib/ipa/backup', '--exclude=/var/lib/ipa/backup',
'--xattrs', '--xattrs',
@@ -564,11 +567,11 @@ class Backup(admintool.AdminTool):
conn = self.get_connection() conn = self.get_connection()
services = conn.get_entries(dn, conn.SCOPE_ONELEVEL) services = conn.get_entries(dn, conn.SCOPE_ONELEVEL)
except errors.NetworkError: except errors.NetworkError:
self.log.critical( logger.critical(
"Unable to obtain list of master services, continuing anyway") "Unable to obtain list of master services, continuing anyway")
except Exception as e: except Exception as e:
self.log.error("Failed to read services from '%s': %s" % logger.error("Failed to read services from '%s': %s",
(conn.host, e)) conn.host, e)
else: else:
services_cns = [s.single_value['cn'] for s in services] services_cns = [s.single_value['cn'] for s in services]
@@ -615,9 +618,9 @@ class Backup(admintool.AdminTool):
(result.returncode, result.error_log)) (result.returncode, result.error_log))
if encrypt: if encrypt:
self.log.info('Encrypting %s' % filename) logger.info('Encrypting %s', filename)
filename = encrypt_file(filename, keyring) filename = encrypt_file(filename, keyring)
shutil.move(self.header, backup_dir) shutil.move(self.header, backup_dir)
self.log.info('Backed up to %s', backup_dir) logger.info('Backed up to %s', backup_dir)

View File

@@ -19,6 +19,7 @@
from __future__ import print_function from __future__ import print_function
import logging
import os import os
from optparse import OptionGroup # pylint: disable=deprecated-module from optparse import OptionGroup # pylint: disable=deprecated-module
from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives import serialization
@@ -35,6 +36,8 @@ from ipaplatform.paths import paths
from ipalib import api, errors, x509 from ipalib import api, errors, x509
from ipaserver.install import certs, cainstance, installutils from ipaserver.install import certs, cainstance, installutils
logger = logging.getLogger(__name__)
class CACertManage(admintool.AdminTool): class CACertManage(admintool.AdminTool):
command_name = 'ipa-cacert-manage' command_name = 'ipa-cacert-manage'
@@ -154,7 +157,7 @@ class CACertManage(admintool.AdminTool):
if self.request_id is None: if self.request_id is None:
raise admintool.ScriptError( raise admintool.ScriptError(
"CA certificate is not tracked by certmonger") "CA certificate is not tracked by certmonger")
self.log.debug( logger.debug(
"Found certmonger request id %r", self.request_id) "Found certmonger request id %r", self.request_id)
db = certs.CertDB(api.env.realm, nssdir=paths.PKI_TOMCAT_ALIAS_DIR) db = certs.CertDB(api.env.realm, nssdir=paths.PKI_TOMCAT_ALIAS_DIR)
@@ -309,7 +312,7 @@ class CACertManage(admintool.AdminTool):
def resubmit_request(self, ca='dogtag-ipa-ca-renew-agent', profile=''): def resubmit_request(self, ca='dogtag-ipa-ca-renew-agent', profile=''):
timeout = api.env.startup_timeout + 60 timeout = api.env.startup_timeout + 60
self.log.debug("resubmitting certmonger request '%s'", self.request_id) logger.debug("resubmitting certmonger request '%s'", self.request_id)
certmonger.resubmit_request(self.request_id, ca=ca, profile=profile) certmonger.resubmit_request(self.request_id, ca=ca, profile=profile)
try: try:
state = certmonger.wait_for_request(self.request_id, timeout) state = certmonger.wait_for_request(self.request_id, timeout)
@@ -323,7 +326,7 @@ class CACertManage(admintool.AdminTool):
"Error resubmitting certmonger request '%s', " "Error resubmitting certmonger request '%s', "
"please check the request manually" % self.request_id) "please check the request manually" % self.request_id)
self.log.debug("modifying certmonger request '%s'", self.request_id) logger.debug("modifying certmonger request '%s'", self.request_id)
certmonger.modify(self.request_id, certmonger.modify(self.request_id,
ca='dogtag-ipa-ca-renew-agent', ca='dogtag-ipa-ca-renew-agent',
profile='') profile='')

View File

@@ -20,6 +20,7 @@
from __future__ import print_function from __future__ import print_function
import logging
import sys import sys
import tempfile import tempfile
from optparse import SUPPRESS_HELP # pylint: disable=deprecated-module from optparse import SUPPRESS_HELP # pylint: disable=deprecated-module
@@ -40,6 +41,8 @@ from ipaserver.install import dogtaginstance
from ipaserver.install import kra from ipaserver.install import kra
from ipaserver.install.installutils import ReplicaConfig from ipaserver.install.installutils import ReplicaConfig
logger = logging.getLogger(__name__)
class KRAInstall(admintool.AdminTool): class KRAInstall(admintool.AdminTool):
@@ -214,7 +217,7 @@ class KRAInstaller(KRAInstall):
try: try:
kra.install(api, config, self.options) kra.install(api, config, self.options)
except: except:
self.log.error(dedent(self.FAIL_MESSAGE)) logger.error('%s', dedent(self.FAIL_MESSAGE))
raise raise
api.Backend.ldap2.disconnect() api.Backend.ldap2.disconnect()

View File

@@ -25,6 +25,7 @@
from __future__ import print_function from __future__ import print_function
import logging
import os import os
import six import six
@@ -39,6 +40,8 @@ from ipaserver.install.upgradeinstance import IPAUpgrade
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
class LDAPUpdater(admintool.AdminTool): class LDAPUpdater(admintool.AdminTool):
command_name = 'ipa-ldap-updater' command_name = 'ipa-ldap-updater'
@@ -69,8 +72,8 @@ class LDAPUpdater(admintool.AdminTool):
self.files = self.args self.files = self.args
if not (self.files or options.schema_files): if not (self.files or options.schema_files):
self.log.info("To execute overall IPA upgrade please use " logger.info("To execute overall IPA upgrade please use "
"'ipa-server-upgrade' command") "'ipa-server-upgrade' command")
raise admintool.ScriptError("No update files or schema file were " raise admintool.ScriptError("No update files or schema file were "
"specified") "specified")
@@ -117,9 +120,9 @@ class LDAPUpdater_Upgrade(LDAPUpdater):
raise admintool.ScriptError('IPA upgrade failed.', 1) raise admintool.ScriptError('IPA upgrade failed.', 1)
else: else:
if upgrade.modified: if upgrade.modified:
self.log.info('Update complete') logger.info('Update complete')
else: else:
self.log.info('Update complete, no data were modified') logger.info('Update complete, no data were modified')
api.Backend.ldap2.disconnect() api.Backend.ldap2.disconnect()
@@ -149,8 +152,8 @@ class LDAPUpdater_NonUpgrade(LDAPUpdater):
modified = ld.update(self.files) or modified modified = ld.update(self.files) or modified
if modified: if modified:
self.log.info('Update complete') logger.info('Update complete')
else: else:
self.log.info('Update complete, no data were modified') logger.info('Update complete, no data were modified')
api.Backend.ldap2.disconnect() api.Backend.ldap2.disconnect()

View File

@@ -20,6 +20,7 @@
import abc import abc
import base64 import base64
import datetime import datetime
import logging
import os import os
import uuid import uuid
@@ -45,6 +46,8 @@ if six.PY3:
unicode = str unicode = str
long = int long = int
logger = logging.getLogger(__name__)
class ValidationError(Exception): class ValidationError(Exception):
pass pass
@@ -538,9 +541,9 @@ class OTPTokenImport(admintool.AdminTool):
try: try:
api.Command.otptoken_add(keypkg.id, no_qrcode=True, **keypkg.options) api.Command.otptoken_add(keypkg.id, no_qrcode=True, **keypkg.options)
except Exception as e: except Exception as e:
self.log.warning("Error adding token: %s", e) logger.warning("Error adding token: %s", e)
else: else:
self.log.info("Added token: %s", keypkg.id) logger.info("Added token: %s", keypkg.id)
keypkg.remove() keypkg.remove()
finally: finally:
api.Backend.ldap2.disconnect() api.Backend.ldap2.disconnect()

View File

@@ -4,11 +4,15 @@
from __future__ import print_function from __future__ import print_function
import logging
from ipalib import api from ipalib import api
from ipaplatform.paths import paths from ipaplatform.paths import paths
from ipapython.admintool import AdminTool from ipapython.admintool import AdminTool
from ipaserver.install.krbinstance import KrbInstance, is_pkinit_enabled from ipaserver.install.krbinstance import KrbInstance, is_pkinit_enabled
logger = logging.getLogger(__name__)
class PKINITManage(AdminTool): class PKINITManage(AdminTool):
command_name = "ipa-pkinit-manage" command_name = "ipa-pkinit-manage"
@@ -64,7 +68,7 @@ class PKINITManage(AdminTool):
krb.stop_tracking_certs() krb.stop_tracking_certs()
except RuntimeError as e: except RuntimeError as e:
if ca_enabled: if ca_enabled:
self.log.warning( logger.warning(
"Failed to stop tracking certificates: %s", e) "Failed to stop tracking certificates: %s", e)
krb.enable_ssl() krb.enable_ssl()
@@ -76,9 +80,9 @@ class PKINITManage(AdminTool):
def enable(self): def enable(self):
if not api.Command.ca_is_enabled()['result']: if not api.Command.ca_is_enabled()['result']:
self.log.error("Cannot enable PKINIT in CA-less deployment") logger.error("Cannot enable PKINIT in CA-less deployment")
self.log.error("Use ipa-server-certinstall to install KDC " logger.error("Use ipa-server-certinstall to install KDC "
"certificate manually") "certificate manually")
raise RuntimeError("Cannot enable PKINIT in CA-less deployment") raise RuntimeError("Cannot enable PKINIT in CA-less deployment")
self._setup(True) self._setup(True)

View File

@@ -21,6 +21,7 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import print_function from __future__ import print_function
import logging
import os import os
import shutil import shutil
import tempfile import tempfile
@@ -52,6 +53,8 @@ from ipalib import errors
from ipaplatform.paths import paths from ipaplatform.paths import paths
from ipalib.constants import DOMAIN_LEVEL_0 from ipalib.constants import DOMAIN_LEVEL_0
logger = logging.getLogger(__name__)
UNSUPPORTED_DOMAIN_LEVEL_TEMPLATE = """ UNSUPPORTED_DOMAIN_LEVEL_TEMPLATE = """
Replica creation using '{command_name}' to generate replica file Replica creation using '{command_name}' to generate replica file
is supported only in {domain_level}-level IPA domain. is supported only in {domain_level}-level IPA domain.
@@ -242,9 +245,9 @@ class ReplicaPrepare(admintool.AdminTool):
if isinstance(e, installutils.HostLookupError): if isinstance(e, installutils.HostLookupError):
if not options.ip_addresses: if not options.ip_addresses:
if dns_container_exists(api.env.basedn): if dns_container_exists(api.env.basedn):
self.log.info('You might use the --ip-address option ' logger.info('You might use the --ip-address option '
'to create a DNS entry if the DNS zone ' 'to create a DNS entry if the DNS zone '
'is managed by IPA.') 'is managed by IPA.')
raise raise
else: else:
# The host doesn't exist in DNS but we're adding it. # The host doesn't exist in DNS but we're adding it.
@@ -254,7 +257,7 @@ class ReplicaPrepare(admintool.AdminTool):
if options.ip_addresses: if options.ip_addresses:
if not dns_container_exists(api.env.basedn): if not dns_container_exists(api.env.basedn):
self.log.error( logger.error(
"It is not possible to add a DNS record automatically " "It is not possible to add a DNS record automatically "
"because DNS is not managed by IPA. Please create DNS " "because DNS is not managed by IPA. Please create DNS "
"record manually and then omit --ip-address option.") "record manually and then omit --ip-address option.")
@@ -266,9 +269,9 @@ class ReplicaPrepare(admintool.AdminTool):
_host, zone = self.replica_fqdn.split('.', 1) _host, zone = self.replica_fqdn.split('.', 1)
if not bindinstance.dns_zone_exists(zone, api=api): if not bindinstance.dns_zone_exists(zone, api=api):
self.log.error("DNS zone %s does not exist in IPA managed DNS " logger.error("DNS zone %s does not exist in IPA managed DNS "
"server. Either create DNS zone or omit " "server. Either create DNS zone or omit "
"--ip-address option." % zone) "--ip-address option.", zone)
raise admintool.ScriptError("Cannot add DNS record") raise admintool.ScriptError("Cannot add DNS record")
self.http_pin = self.dirsrv_pin = None self.http_pin = self.dirsrv_pin = None
@@ -312,8 +315,8 @@ class ReplicaPrepare(admintool.AdminTool):
options = self.options options = self.options
super(ReplicaPrepare, self).run() super(ReplicaPrepare, self).run()
self.log.info("Preparing replica for %s from %s", logger.info("Preparing replica for %s from %s",
self.replica_fqdn, api.env.host) self.replica_fqdn, api.env.host)
enable_replication_version_checking( enable_replication_version_checking(
api.env.realm, api.env.realm,
self.dirman_password) self.dirman_password)
@@ -352,7 +355,7 @@ class ReplicaPrepare(admintool.AdminTool):
fd.write("%s\n" % (self.dirsrv_pin or '')) fd.write("%s\n" % (self.dirsrv_pin or ''))
if options.dirsrv_cert_files: if options.dirsrv_cert_files:
self.log.info("Copying SSL certificate for the Directory Server") logger.info("Copying SSL certificate for the Directory Server")
self.copy_info_file(self.dirsrv_pkcs12_file.name, "dscert.p12") self.copy_info_file(self.dirsrv_pkcs12_file.name, "dscert.p12")
else: else:
if ipautil.file_exists(options.ca_file): if ipautil.file_exists(options.ca_file):
@@ -366,15 +369,15 @@ class ReplicaPrepare(admintool.AdminTool):
raise admintool.ScriptError("Root CA PKCS#12 not " raise admintool.ScriptError("Root CA PKCS#12 not "
"found in %s" % options.ca_file) "found in %s" % options.ca_file)
self.log.info( logger.info(
"Creating SSL certificate for the Directory Server") "Creating SSL certificate for the Directory Server")
self.export_certdb("dscert", passwd_fname) self.export_certdb("dscert", passwd_fname)
if not options.dirsrv_cert_files: if not options.dirsrv_cert_files:
self.log.info( logger.info(
"Creating SSL certificate for the dogtag Directory Server") "Creating SSL certificate for the dogtag Directory Server")
self.export_certdb("dogtagcert", passwd_fname) self.export_certdb("dogtagcert", passwd_fname)
self.log.info("Saving dogtag Directory Server port") logger.info("Saving dogtag Directory Server port")
port_fname = os.path.join( port_fname = os.path.join(
self.dir, "dogtag_directory_port.txt") self.dir, "dogtag_directory_port.txt")
with open(port_fname, "w") as fd: with open(port_fname, "w") as fd:
@@ -388,17 +391,17 @@ class ReplicaPrepare(admintool.AdminTool):
fd.write("%s\n" % (self.http_pin or '')) fd.write("%s\n" % (self.http_pin or ''))
if options.http_cert_files: if options.http_cert_files:
self.log.info("Copying SSL certificate for the Web Server") logger.info("Copying SSL certificate for the Web Server")
self.copy_info_file(self.http_pkcs12_file.name, "httpcert.p12") self.copy_info_file(self.http_pkcs12_file.name, "httpcert.p12")
else: else:
self.log.info("Creating SSL certificate for the Web Server") logger.info("Creating SSL certificate for the Web Server")
self.export_certdb("httpcert", passwd_fname) self.export_certdb("httpcert", passwd_fname)
self.log.info("Exporting RA certificate") logger.info("Exporting RA certificate")
self.export_ra_pkcs12() self.export_ra_pkcs12()
def copy_misc_files(self): def copy_misc_files(self):
self.log.info("Copying additional files") logger.info("Copying additional files")
cacert_filename = paths.CACERT_PEM cacert_filename = paths.CACERT_PEM
if ipautil.file_exists(cacert_filename): if ipautil.file_exists(cacert_filename):
@@ -406,13 +409,13 @@ class ReplicaPrepare(admintool.AdminTool):
self.copy_info_file(paths.IPA_DEFAULT_CONF, "default.conf") self.copy_info_file(paths.IPA_DEFAULT_CONF, "default.conf")
def retrieve_ca_certs(self): def retrieve_ca_certs(self):
self.log.info("Retrieving CA certificates") logger.info("Retrieving CA certificates")
dest = os.path.join(self.dir, "ca.crt") dest = os.path.join(self.dir, "ca.crt")
install_ca_cert(api.Backend.ldap2, api.env.basedn, install_ca_cert(api.Backend.ldap2, api.env.basedn,
api.env.realm, paths.IPA_CA_CRT, destfile=dest) api.env.realm, paths.IPA_CA_CRT, destfile=dest)
def save_config(self): def save_config(self):
self.log.info("Finalizing configuration") logger.info("Finalizing configuration")
config = SafeConfigParser() config = SafeConfigParser()
config.add_section("realm") config.add_section("realm")
@@ -430,7 +433,7 @@ class ReplicaPrepare(admintool.AdminTool):
replicafile = paths.REPLICA_INFO_TEMPLATE % self.replica_fqdn replicafile = paths.REPLICA_INFO_TEMPLATE % self.replica_fqdn
encfile = "%s.gpg" % replicafile encfile = "%s.gpg" % replicafile
self.log.info("Packaging replica information into %s", encfile) logger.info("Packaging replica information into %s", encfile)
ipautil.run( ipautil.run(
[paths.TAR, "cf", replicafile, "-C", self.top_dir, "realm_info"]) [paths.TAR, "cf", replicafile, "-C", self.top_dir, "realm_info"])
installutils.encrypt_file( installutils.encrypt_file(
@@ -443,11 +446,11 @@ class ReplicaPrepare(admintool.AdminTool):
def add_dns_records(self): def add_dns_records(self):
options = self.options options = self.options
self.log.info("Adding DNS records for %s", self.replica_fqdn) logger.info("Adding DNS records for %s", self.replica_fqdn)
name, domain = self.replica_fqdn.split(".", 1) name, domain = self.replica_fqdn.split(".", 1)
for reverse_zone in options.reverse_zones: for reverse_zone in options.reverse_zones:
self.log.info("Adding reverse zone %s", reverse_zone) logger.info("Adding reverse zone %s", reverse_zone)
add_zone(reverse_zone) add_zone(reverse_zone)
for ip in options.ip_addresses: for ip in options.ip_addresses:
@@ -461,7 +464,7 @@ class ReplicaPrepare(admintool.AdminTool):
if not options.no_reverse: if not options.no_reverse:
reverse_zone = bindinstance.find_reverse_zone(ip) reverse_zone = bindinstance.find_reverse_zone(ip)
if reverse_zone is None: if reverse_zone is None:
self.log.warning( logger.warning(
"Could not find any IPA managed reverse zone. " "Could not find any IPA managed reverse zone. "
"Not creating PTR records") "Not creating PTR records")
return return
@@ -486,8 +489,8 @@ class ReplicaPrepare(admintool.AdminTool):
except exceptions: except exceptions:
return False return False
except Exception as e: except Exception as e:
self.log.warning('Exception while waiting for DNS record: %s: %s', logger.warning('Exception while waiting for DNS record: %s: %s',
type(e).__name__, e) type(e).__name__, e)
return True return True
@@ -499,20 +502,20 @@ class ReplicaPrepare(admintool.AdminTool):
replica_fqdn += '.' replica_fqdn += '.'
if self.check_dns(replica_fqdn): if self.check_dns(replica_fqdn):
self.log.debug('%s A/AAAA record resolvable', replica_fqdn) logger.debug('%s A/AAAA record resolvable', replica_fqdn)
return return
self.log.info('Waiting for %s A or AAAA record to be resolvable', logger.info('Waiting for %s A or AAAA record to be resolvable',
replica_fqdn) replica_fqdn)
print('This can be safely interrupted (Ctrl+C)') print('This can be safely interrupted (Ctrl+C)')
try: try:
while not self.check_dns(replica_fqdn): while not self.check_dns(replica_fqdn):
time.sleep(1) time.sleep(1)
except KeyboardInterrupt: except KeyboardInterrupt:
self.log.info('Interrupted') logger.info('Interrupted')
else: else:
self.log.debug('%s A/AAAA record resolvable', replica_fqdn) logger.debug('%s A/AAAA record resolvable', replica_fqdn)
def copy_info_file(self, source, dest): def copy_info_file(self, source, dest):
"""Copy a file into the info directory """Copy a file into the info directory
@@ -521,7 +524,7 @@ class ReplicaPrepare(admintool.AdminTool):
:param dest: The destination file (relative to the info directory) :param dest: The destination file (relative to the info directory)
""" """
dest_path = os.path.join(self.dir, dest) dest_path = os.path.join(self.dir, dest)
self.log.debug('Copying %s to %s', source, dest_path) logger.debug('Copying %s to %s', source, dest_path)
try: try:
shutil.copy(source, dest_path) shutil.copy(source, dest_path)
except IOError as e: except IOError as e:
@@ -558,7 +561,7 @@ class ReplicaPrepare(admintool.AdminTool):
try: try:
db.export_pkcs12(pkcs12_fname, passwd_fname, nickname) db.export_pkcs12(pkcs12_fname, passwd_fname, nickname)
except ipautil.CalledProcessError as e: except ipautil.CalledProcessError as e:
self.log.info("error exporting Server certificate: %s", e) logger.info("error exporting Server certificate: %s", e)
installutils.remove_file(pkcs12_fname) installutils.remove_file(pkcs12_fname)
installutils.remove_file(passwd_fname) installutils.remove_file(passwd_fname)
@@ -619,7 +622,8 @@ class ReplicaPrepare(admintool.AdminTool):
domain_level = dsinstance.get_domain_level(api) domain_level = dsinstance.get_domain_level(api)
if domain_level > DOMAIN_LEVEL_0: if domain_level > DOMAIN_LEVEL_0:
self.log.error( logger.error(
'%s',
UNSUPPORTED_DOMAIN_LEVEL_TEMPLATE.format( UNSUPPORTED_DOMAIN_LEVEL_TEMPLATE.format(
command_name=self.command_name, command_name=self.command_name,
domain_level=DOMAIN_LEVEL_0, domain_level=DOMAIN_LEVEL_0,

View File

@@ -17,6 +17,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
# #
import logging
import os import os
import shutil import shutil
import tempfile import tempfile
@@ -57,6 +58,9 @@ try:
except ImportError: except ImportError:
adtrustinstance = None adtrustinstance = None
logger = logging.getLogger(__name__)
def recursive_chown(path, uid, gid): def recursive_chown(path, uid, gid):
''' '''
Change ownership of all files and directories in a path. Change ownership of all files and directories in a path.
@@ -102,7 +106,7 @@ def decrypt_file(tmpdir, filename, keyring):
class RemoveRUVParser(ldif.LDIFParser): class RemoveRUVParser(ldif.LDIFParser):
def __init__(self, input_file, writer, logger): def __init__(self, input_file, writer):
ldif.LDIFParser.__init__(self, input_file) ldif.LDIFParser.__init__(self, input_file)
self.writer = writer self.writer = writer
self.log = logger self.log = logger
@@ -220,8 +224,8 @@ class Restore(admintool.AdminTool):
if not os.path.isabs(self.backup_dir): if not os.path.isabs(self.backup_dir):
self.backup_dir = os.path.join(paths.IPA_BACKUP_DIR, self.backup_dir) self.backup_dir = os.path.join(paths.IPA_BACKUP_DIR, self.backup_dir)
self.log.info("Preparing restore from %s on %s", logger.info("Preparing restore from %s on %s",
self.backup_dir, FQDN) self.backup_dir, FQDN)
self.header = os.path.join(self.backup_dir, 'header') self.header = os.path.join(self.backup_dir, 'header')
@@ -281,8 +285,8 @@ class Restore(admintool.AdminTool):
raise admintool.ScriptError( raise admintool.ScriptError(
"Cannot restore a data backup into an empty system") "Cannot restore a data backup into an empty system")
self.log.info("Performing %s restore from %s backup" % logger.info("Performing %s restore from %s backup",
(restore_type, self.backup_type)) restore_type, self.backup_type)
if self.backup_host != FQDN: if self.backup_host != FQDN:
raise admintool.ScriptError( raise admintool.ScriptError(
@@ -290,11 +294,11 @@ class Restore(admintool.AdminTool):
(FQDN, self.backup_host)) (FQDN, self.backup_host))
if self.backup_ipa_version != str(version.VERSION): if self.backup_ipa_version != str(version.VERSION):
self.log.warning( logger.warning(
"Restoring data from a different release of IPA.\n" "Restoring data from a different release of IPA.\n"
"Data is version %s.\n" "Data is version %s.\n"
"Server is running %s." % "Server is running %s.",
(self.backup_ipa_version, str(version.VERSION))) self.backup_ipa_version, str(version.VERSION))
if (not options.unattended and if (not options.unattended and
not user_input("Continue to restore?", False)): not user_input("Continue to restore?", False)):
raise admintool.ScriptError("Aborted") raise admintool.ScriptError("Aborted")
@@ -349,30 +353,30 @@ class Restore(admintool.AdminTool):
not user_input("Restoring data will overwrite existing live data. Continue to restore?", False)): not user_input("Restoring data will overwrite existing live data. Continue to restore?", False)):
raise admintool.ScriptError("Aborted") raise admintool.ScriptError("Aborted")
self.log.info( logger.info(
"Each master will individually need to be re-initialized or") "Each master will individually need to be re-initialized or")
self.log.info( logger.info(
"re-created from this one. The replication agreements on") "re-created from this one. The replication agreements on")
self.log.info( logger.info(
"masters running IPA 3.1 or earlier will need to be manually") "masters running IPA 3.1 or earlier will need to be manually")
self.log.info( logger.info(
"re-enabled. See the man page for details.") "re-enabled. See the man page for details.")
self.log.info("Disabling all replication.") logger.info("Disabling all replication.")
self.disable_agreements() self.disable_agreements()
if restore_type != 'FULL': if restore_type != 'FULL':
if not options.online: if not options.online:
self.log.info('Stopping Directory Server') logger.info('Stopping Directory Server')
dirsrv.stop(capture_output=False) dirsrv.stop(capture_output=False)
else: else:
self.log.info('Starting Directory Server') logger.info('Starting Directory Server')
dirsrv.start(capture_output=False) dirsrv.start(capture_output=False)
else: else:
self.log.info('Stopping IPA services') logger.info('Stopping IPA services')
result = run(['ipactl', 'stop'], raiseonerr=False) result = run(['ipactl', 'stop'], raiseonerr=False)
if result.returncode not in [0, 6]: if result.returncode not in [0, 6]:
self.log.warning('Stopping IPA failed: %s' % result.error_log) logger.warning('Stopping IPA failed: %s', result.error_log)
self.restore_selinux_booleans() self.restore_selinux_booleans()
@@ -394,7 +398,7 @@ class Restore(admintool.AdminTool):
if restore_type != 'FULL': if restore_type != 'FULL':
if not options.online: if not options.online:
self.log.info('Starting Directory Server') logger.info('Starting Directory Server')
dirsrv.start(capture_output=False) dirsrv.start(capture_output=False)
else: else:
# restore access controll configuration # restore access controll configuration
@@ -406,12 +410,12 @@ class Restore(admintool.AdminTool):
services.knownservices.pki_tomcatd.enable() services.knownservices.pki_tomcatd.enable()
services.knownservices.pki_tomcatd.disable() services.knownservices.pki_tomcatd.disable()
self.log.info('Restarting GSS-proxy') logger.info('Restarting GSS-proxy')
gssproxy = services.service('gssproxy', api) gssproxy = services.service('gssproxy', api)
gssproxy.reload_or_restart() gssproxy.reload_or_restart()
self.log.info('Starting IPA services') logger.info('Starting IPA services')
run(['ipactl', 'start']) run(['ipactl', 'start'])
self.log.info('Restarting SSSD') logger.info('Restarting SSSD')
sssd = services.service('sssd', api) sssd = services.service('sssd', api)
sssd.restart() sssd.restart()
http.remove_httpd_ccaches() http.remove_httpd_ccaches()
@@ -421,7 +425,7 @@ class Restore(admintool.AdminTool):
try: try:
os.chdir(cwd) os.chdir(cwd)
except Exception as e: except Exception as e:
self.log.error('Cannot change directory to %s: %s' % (cwd, e)) logger.error('Cannot change directory to %s: %s', cwd, e)
shutil.rmtree(self.top_dir) shutil.rmtree(self.top_dir)
@@ -459,7 +463,8 @@ class Restore(admintool.AdminTool):
try: try:
conn = self.get_connection() conn = self.get_connection()
except Exception as e: except Exception as e:
self.log.error('Unable to get connection, skipping disabling agreements: %s' % e) logger.error('Unable to get connection, skipping disabling '
'agreements: %s', e)
return return
masters = [] masters = []
dn = DN(('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn) dn = DN(('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
@@ -479,7 +484,8 @@ class Restore(admintool.AdminTool):
repl = ReplicationManager(api.env.realm, master, repl = ReplicationManager(api.env.realm, master,
self.dirman_password) self.dirman_password)
except Exception as e: except Exception as e:
self.log.critical("Unable to disable agreement on %s: %s" % (master, e)) logger.critical("Unable to disable agreement on %s: %s",
master, e)
continue continue
master_dn = DN(('cn', master), ('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn) master_dn = DN(('cn', master), ('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
@@ -496,7 +502,8 @@ class Restore(admintool.AdminTool):
for rep in host_entries] for rep in host_entries]
for host in hosts: for host in hosts:
self.log.info('Disabling replication agreement on %s to %s' % (master, host)) logger.info('Disabling replication agreement on %s to %s',
master, host)
repl.disable_agreement(host) repl.disable_agreement(host)
if 'CA' in services_cns: if 'CA' in services_cns:
@@ -504,14 +511,16 @@ class Restore(admintool.AdminTool):
repl = get_cs_replication_manager(api.env.realm, master, repl = get_cs_replication_manager(api.env.realm, master,
self.dirman_password) self.dirman_password)
except Exception as e: except Exception as e:
self.log.critical("Unable to disable agreement on %s: %s" % (master, e)) logger.critical("Unable to disable agreement on %s: %s",
master, e)
continue continue
host_entries = repl.find_ipa_replication_agreements() host_entries = repl.find_ipa_replication_agreements()
hosts = [rep.single_value.get('nsds5replicahost') hosts = [rep.single_value.get('nsds5replicahost')
for rep in host_entries] for rep in host_entries]
for host in hosts: for host in hosts:
self.log.info('Disabling CA replication agreement on %s to %s' % (master, host)) logger.info('Disabling CA replication agreement on %s to '
'%s', master, host)
repl.hostnames = [master, host] repl.hostnames = [master, host]
repl.disable_agreement(host) repl.disable_agreement(host)
@@ -522,7 +531,7 @@ class Restore(admintool.AdminTool):
If executed online create a task and wait for it to complete. If executed online create a task and wait for it to complete.
''' '''
self.log.info('Restoring from %s in %s' % (backend, instance)) logger.info('Restoring from %s in %s', backend, instance)
cn = time.strftime('import_%Y_%m_%d_%H_%M_%S') cn = time.strftime('import_%Y_%m_%d_%H_%M_%S')
dn = DN(('cn', cn), ('cn', 'import'), ('cn', 'tasks'), ('cn', 'config')) dn = DN(('cn', cn), ('cn', 'import'), ('cn', 'tasks'), ('cn', 'config'))
@@ -542,7 +551,7 @@ class Restore(admintool.AdminTool):
with open(ldiffile, 'wb') as out_file: with open(ldiffile, 'wb') as out_file:
ldif_writer = ldif.LDIFWriter(out_file) ldif_writer = ldif.LDIFWriter(out_file)
with open(srcldiffile, 'rb') as in_file: with open(srcldiffile, 'rb') as in_file:
ldif_parser = RemoveRUVParser(in_file, ldif_writer, self.log) ldif_parser = RemoveRUVParser(in_file, ldif_writer)
ldif_parser.parse() ldif_parser.parse()
# Make sure the modified ldiffile is owned by DS_USER # Make sure the modified ldiffile is owned by DS_USER
@@ -565,10 +574,10 @@ class Restore(admintool.AdminTool):
try: try:
conn.add_entry(ent) conn.add_entry(ent)
except Exception as e: except Exception as e:
self.log.error("Unable to bind to LDAP server: %s" % e) logger.error("Unable to bind to LDAP server: %s", e)
return return
self.log.info("Waiting for LDIF to finish") logger.info("Waiting for LDIF to finish")
wait_for_task(conn, dn) wait_for_task(conn, dn)
else: else:
try: try:
@@ -582,7 +591,7 @@ class Restore(admintool.AdminTool):
'-n', backend] '-n', backend]
result = run(args, raiseonerr=False) result = run(args, raiseonerr=False)
if result.returncode != 0: if result.returncode != 0:
self.log.critical("ldif2db failed: %s" % result.error_log) logger.critical("ldif2db failed: %s", result.error_log)
def bak2db(self, instance, backend, online=True): def bak2db(self, instance, backend, online=True):
@@ -598,9 +607,9 @@ class Restore(admintool.AdminTool):
to treat ipaca specially. to treat ipaca specially.
''' '''
if backend is not None: if backend is not None:
self.log.info('Restoring %s in %s' % (backend, instance)) logger.info('Restoring %s in %s', backend, instance)
else: else:
self.log.info('Restoring %s' % instance) logger.info('Restoring %s', instance)
cn = time.strftime('restore_%Y_%m_%d_%H_%M_%S') cn = time.strftime('restore_%Y_%m_%d_%H_%M_%S')
@@ -626,7 +635,7 @@ class Restore(admintool.AdminTool):
raise admintool.ScriptError('Unable to bind to LDAP server: %s' raise admintool.ScriptError('Unable to bind to LDAP server: %s'
% e) % e)
self.log.info("Waiting for restore to finish") logger.info("Waiting for restore to finish")
wait_for_task(conn, dn) wait_for_task(conn, dn)
else: else:
args = [paths.BAK2DB, args = [paths.BAK2DB,
@@ -637,7 +646,7 @@ class Restore(admintool.AdminTool):
args.append(backend) args.append(backend)
result = run(args, raiseonerr=False) result = run(args, raiseonerr=False)
if result.returncode != 0: if result.returncode != 0:
self.log.critical("bak2db failed: %s" % result.error_log) logger.critical("bak2db failed: %s", result.error_log)
def restore_default_conf(self): def restore_default_conf(self):
@@ -659,8 +668,8 @@ class Restore(admintool.AdminTool):
result = run(args, raiseonerr=False) result = run(args, raiseonerr=False)
if result.returncode != 0: if result.returncode != 0:
self.log.critical('Restoring %s failed: %s' % logger.critical('Restoring %s failed: %s',
(paths.IPA_DEFAULT_CONF, result.error_log)) paths.IPA_DEFAULT_CONF, result.error_log)
os.chdir(cwd) os.chdir(cwd)
def remove_old_files(self): def remove_old_files(self):
@@ -673,15 +682,14 @@ class Restore(admintool.AdminTool):
shutil.rmtree(d) shutil.rmtree(d)
except OSError as e: except OSError as e:
if e.errno != 2: # 2: dir does not exist if e.errno != 2: # 2: dir does not exist
self.log.warning("Could not remove directory: %s (%s)", logger.warning("Could not remove directory: %s (%s)", d, e)
d, e)
for f in self.FILES_TO_BE_REMOVED: for f in self.FILES_TO_BE_REMOVED:
try: try:
os.remove(f) os.remove(f)
except OSError as e: except OSError as e:
if e.errno != 2: # 2: file does not exist if e.errno != 2: # 2: file does not exist
self.log.warning("Could not remove file: %s (%s)", f, e) logger.warning("Could not remove file: %s (%s)", f, e)
def file_restore(self, nologs=False): def file_restore(self, nologs=False):
''' '''
@@ -690,7 +698,7 @@ class Restore(admintool.AdminTool):
This MUST be done offline because we directly backup the 389-ds This MUST be done offline because we directly backup the 389-ds
databases. databases.
''' '''
self.log.info("Restoring files") logger.info("Restoring files")
cwd = os.getcwd() cwd = os.getcwd()
os.chdir('/') os.chdir('/')
args = ['tar', args = ['tar',
@@ -705,7 +713,7 @@ class Restore(admintool.AdminTool):
result = run(args, raiseonerr=False) result = run(args, raiseonerr=False)
if result.returncode != 0: if result.returncode != 0:
self.log.critical('Restoring files failed: %s', result.error_log) logger.critical('Restoring files failed: %s', result.error_log)
os.chdir(cwd) os.chdir(cwd)
@@ -750,7 +758,7 @@ class Restore(admintool.AdminTool):
encrypt = True encrypt = True
if encrypt: if encrypt:
self.log.info('Decrypting %s' % filename) logger.info('Decrypting %s', filename)
filename = decrypt_file(self.dir, filename, keyring) filename = decrypt_file(self.dir, filename, keyring)
os.chdir(self.dir) os.chdir(self.dir)
@@ -792,20 +800,20 @@ class Restore(admintool.AdminTool):
try: try:
pent = pwd.getpwnam(constants.PKI_USER) pent = pwd.getpwnam(constants.PKI_USER)
except KeyError: except KeyError:
self.log.debug("No %s user exists, skipping CA directory creation", logger.debug("No %s user exists, skipping CA directory creation",
constants.PKI_USER) constants.PKI_USER)
return return
self.log.debug('Creating log directories for dogtag') logger.debug('Creating log directories for dogtag')
for dir in dirs: for dir in dirs:
try: try:
self.log.debug('Creating %s' % dir) logger.debug('Creating %s', dir)
os.mkdir(dir) os.mkdir(dir)
os.chmod(dir, 0o770) os.chmod(dir, 0o770)
os.chown(dir, pent.pw_uid, pent.pw_gid) os.chown(dir, pent.pw_uid, pent.pw_gid)
tasks.restore_context(dir) tasks.restore_context(dir)
except Exception as e: except Exception as e:
# This isn't so fatal as to side-track the restore # This isn't so fatal as to side-track the restore
self.log.error('Problem with %s: %s' % (dir, e)) logger.error('Problem with %s: %s', dir, e)
def restore_selinux_booleans(self): def restore_selinux_booleans(self):
bools = dict(constants.SELINUX_BOOLEAN_HTTPD) bools = dict(constants.SELINUX_BOOLEAN_HTTPD)
@@ -813,13 +821,13 @@ class Restore(admintool.AdminTool):
if adtrustinstance: if adtrustinstance:
bools.update(constants.SELINUX_BOOLEAN_ADTRUST) bools.update(constants.SELINUX_BOOLEAN_ADTRUST)
else: else:
self.log.error( logger.error(
'The AD trust package was not found, ' 'The AD trust package was not found, '
'not setting SELinux booleans.') 'not setting SELinux booleans.')
try: try:
tasks.set_selinux_booleans(bools) tasks.set_selinux_booleans(bools)
except ipapython.errors.SetseboolError as e: except ipapython.errors.SetseboolError as e:
self.log.error('%s', e) logger.error('%s', e)
def cert_restore_prepare(self): def cert_restore_prepare(self):
cainstance.CAInstance().stop_tracking_certificates() cainstance.CAInstance().stop_tracking_certificates()
@@ -838,7 +846,7 @@ class Restore(admintool.AdminTool):
try: try:
ipautil.backup_file(filename) ipautil.backup_file(filename)
except OSError as e: except OSError as e:
self.log.error("Failed to backup %s: %s" % (filename, e)) logger.error("Failed to backup %s: %s", filename, e)
tasks.remove_ca_certs_from_systemwide_ca_store() tasks.remove_ca_certs_from_systemwide_ca_store()
@@ -846,7 +854,7 @@ class Restore(admintool.AdminTool):
try: try:
update_ipa_nssdb() update_ipa_nssdb()
except RuntimeError as e: except RuntimeError as e:
self.log.error("%s", e) logger.error("%s", e)
tasks.reload_systemwide_ca_store() tasks.reload_systemwide_ca_store()

View File

@@ -2,12 +2,16 @@
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license # Copyright (C) 2015 FreeIPA Contributors see COPYING for license
# #
import logging
from ipalib import api from ipalib import api
from ipaplatform.paths import paths from ipaplatform.paths import paths
from ipapython import admintool from ipapython import admintool
from ipaserver.install import installutils from ipaserver.install import installutils
from ipaserver.install import server from ipaserver.install import server
logger = logging.getLogger(__name__)
class ServerUpgrade(admintool.AdminTool): class ServerUpgrade(admintool.AdminTool):
log_file_name = paths.IPAUPGRADE_LOG log_file_name = paths.IPAUPGRADE_LOG
@@ -50,7 +54,7 @@ class ServerUpgrade(admintool.AdminTool):
def handle_error(self, exception): def handle_error(self, exception):
if not isinstance(exception, SystemExit): if not isinstance(exception, SystemExit):
# do not log this message when ipa is not installed # do not log this message when ipa is not installed
self.log.error("IPA server upgrade failed: Inspect " logger.error("IPA server upgrade failed: Inspect "
"/var/log/ipaupgrade.log and run command " "/var/log/ipaupgrade.log and run command "
"ipa-server-upgrade manually.") "ipa-server-upgrade manually.")
return installutils.handle_error(exception, self.log_file_name) return installutils.handle_error(exception, self.log_file_name)

View File

@@ -17,6 +17,8 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
# #
import logging
import gssapi import gssapi
import sys import sys
@@ -33,6 +35,8 @@ from ipaserver.install import replication, installutils
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
DEFAULT_TRUST_VIEW_NAME = u'Default Trust View' DEFAULT_TRUST_VIEW_NAME = u'Default Trust View'
@@ -180,10 +184,10 @@ class WinsyncMigrate(admintool.AdminTool):
**kwargs **kwargs
) )
except Exception as e: except Exception as e:
self.log.warning("Migration failed: %s (%s)" logger.warning("Migration failed: %s (%s)",
% (user_identifier, str(e))) user_identifier, str(e))
else: else:
self.log.debug("Migrated: %s" % user_identifier) logger.debug("Migrated: %s", user_identifier)
def find_winsync_users(self): def find_winsync_users(self):
""" """
@@ -198,7 +202,7 @@ class WinsyncMigrate(admintool.AdminTool):
paged_search=True) paged_search=True)
for entry in entries: for entry in entries:
self.log.debug("Discovered entry: %s" % entry) logger.debug("Discovered entry: %s", entry)
return entries return entries
@@ -328,10 +332,11 @@ class WinsyncMigrate(admintool.AdminTool):
) )
def warn_passsync(self): def warn_passsync(self):
self.log.warning("Migration completed. Please note that if PassSync " logger.warning("Migration completed. Please note that if PassSync "
"was configured on the given Active Directory server, " "was configured on the given Active Directory server, "
"it needs to be manually removed, otherwise it may try " "it needs to be manually removed, otherwise it may try "
"to reset password for accounts that are no longer existent.") "to reset password for accounts that are no longer "
"existent.")
@classmethod @classmethod
def main(cls, argv): def main(cls, argv):

View File

@@ -17,6 +17,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
# #
import logging
import os import os
import pwd import pwd
import shutil import shutil
@@ -38,7 +39,8 @@ from ipaserver.install import installutils
from ipaserver.install import ldapupdate from ipaserver.install import ldapupdate
from ipaserver.install.dogtaginstance import DogtagInstance from ipaserver.install.dogtaginstance import DogtagInstance
from ipaserver.plugins import ldap2 from ipaserver.plugins import ldap2
from ipapython.ipa_log_manager import log_mgr
logger = logging.getLogger(__name__)
# When IPA is installed with DNS support, this CNAME should hold all IPA # When IPA is installed with DNS support, this CNAME should hold all IPA
# replicas with KRA configured # replicas with KRA configured
@@ -72,7 +74,6 @@ class KRAInstance(DogtagInstance):
) )
self.basedn = DN(('o', 'kra'), ('o', 'ipaca')) self.basedn = DN(('o', 'kra'), ('o', 'ipaca'))
self.log = log_mgr.get_logger(self)
def configure_instance(self, realm_name, host_name, dm_password, def configure_instance(self, realm_name, host_name, dm_password,
admin_password, pkcs12_info=None, master_host=None, admin_password, pkcs12_info=None, master_host=None,
@@ -293,7 +294,7 @@ class KRAInstance(DogtagInstance):
os.remove(cfg_file) os.remove(cfg_file)
shutil.move(paths.KRA_BACKUP_KEYS_P12, paths.KRACERT_P12) shutil.move(paths.KRA_BACKUP_KEYS_P12, paths.KRACERT_P12)
self.log.debug("completed creating KRA instance") logger.debug("completed creating KRA instance")
def __create_kra_agent(self): def __create_kra_agent(self):
""" """

View File

@@ -23,6 +23,7 @@
# save undo files? # save undo files?
import base64 import base64
import logging
import sys import sys
import uuid import uuid
import platform import platform
@@ -41,11 +42,12 @@ from ipalib import api, create_api
from ipalib import constants from ipalib import constants
from ipaplatform.paths import paths from ipaplatform.paths import paths
from ipapython.dn import DN from ipapython.dn import DN
from ipapython.ipa_log_manager import log_mgr
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
UPDATES_DIR=paths.UPDATES_DIR UPDATES_DIR=paths.UPDATES_DIR
UPDATE_SEARCH_TIME_LIMIT = 30 # seconds UPDATE_SEARCH_TIME_LIMIT = 30 # seconds
@@ -250,7 +252,6 @@ class LDAPUpdate(object):
update format. update format.
''' '''
log_mgr.get_logger(self, True)
self.sub_dict = sub_dict self.sub_dict = sub_dict
self.dm_password = dm_password self.dm_password = dm_password
self.conn = None self.conn = None
@@ -548,8 +549,8 @@ class LDAPUpdate(object):
nsIndexAttribute=[attribute], nsIndexAttribute=[attribute],
) )
self.debug("Creating task to index attribute: %s", attribute) logger.debug("Creating task to index attribute: %s", attribute)
self.debug("Task id: %s", dn) logger.debug("Task id: %s", dn)
self.conn.add_entry(e) self.conn.add_entry(e)
@@ -571,10 +572,10 @@ class LDAPUpdate(object):
try: try:
entry = self.conn.get_entry(dn, attrlist) entry = self.conn.get_entry(dn, attrlist)
except errors.NotFound as e: except errors.NotFound as e:
self.error("Task not found: %s", dn) logger.error("Task not found: %s", dn)
return return
except errors.DatabaseError as e: except errors.DatabaseError as e:
self.error("Task lookup failure %s", e) logger.error("Task lookup failure %s", e)
return return
status = entry.single_value.get('nstaskstatus') status = entry.single_value.get('nstaskstatus')
@@ -584,10 +585,10 @@ class LDAPUpdate(object):
continue continue
if status.lower().find("finished") > -1: if status.lower().find("finished") > -1:
self.debug("Indexing finished") logger.debug("Indexing finished")
break break
self.debug("Indexing in progress") logger.debug("Indexing in progress")
time.sleep(1) time.sleep(1)
return return
@@ -663,54 +664,76 @@ class LDAPUpdate(object):
entry_values = entry.raw.get(attr, []) entry_values = entry.raw.get(attr, [])
if action == 'remove': if action == 'remove':
self.debug("remove: '%s' from %s, current value %s", safe_output(attr, update_value), attr, safe_output(attr,entry_values)) logger.debug("remove: '%s' from %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
try: try:
entry_values.remove(update_value) entry_values.remove(update_value)
except ValueError: except ValueError:
self.debug( logger.debug(
"remove: '%s' not in %s", "remove: '%s' not in %s",
safe_output(attr, update_value), attr) safe_output(attr, update_value), attr)
else: else:
entry.raw[attr] = entry_values entry.raw[attr] = entry_values
self.debug('remove: updated value %s', safe_output( logger.debug('remove: updated value %s', safe_output(
attr, entry_values)) attr, entry_values))
elif action == 'add': elif action == 'add':
self.debug("add: '%s' to %s, current value %s", safe_output(attr, update_value), attr, safe_output(attr, entry_values)) logger.debug("add: '%s' to %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
# Remove it, ignoring errors so we can blindly add it later # Remove it, ignoring errors so we can blindly add it later
try: try:
entry_values.remove(update_value) entry_values.remove(update_value)
except ValueError: except ValueError:
pass pass
entry_values.append(update_value) entry_values.append(update_value)
self.debug('add: updated value %s', safe_output(attr, entry_values)) logger.debug('add: updated value %s',
safe_output(attr, entry_values))
entry.raw[attr] = entry_values entry.raw[attr] = entry_values
elif action == 'addifnew': elif action == 'addifnew':
self.debug("addifnew: '%s' to %s, current value %s", safe_output(attr, update_value), attr, safe_output(attr, entry_values)) logger.debug("addifnew: '%s' to %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
# Only add the attribute if it doesn't exist. Only works # Only add the attribute if it doesn't exist. Only works
# with single-value attributes. Entry must exist. # with single-value attributes. Entry must exist.
if entry.get('objectclass') and len(entry_values) == 0: if entry.get('objectclass') and len(entry_values) == 0:
entry_values.append(update_value) entry_values.append(update_value)
self.debug('addifnew: set %s to %s', attr, safe_output(attr, entry_values)) logger.debug('addifnew: set %s to %s',
attr, safe_output(attr, entry_values))
entry.raw[attr] = entry_values entry.raw[attr] = entry_values
elif action == 'addifexist': elif action == 'addifexist':
self.debug("addifexist: '%s' to %s, current value %s", safe_output(attr, update_value), attr, safe_output(attr, entry_values)) logger.debug("addifexist: '%s' to %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
# Only add the attribute if the entry doesn't exist. We # Only add the attribute if the entry doesn't exist. We
# determine this based on whether it has an objectclass # determine this based on whether it has an objectclass
if entry.get('objectclass'): if entry.get('objectclass'):
entry_values.append(update_value) entry_values.append(update_value)
self.debug('addifexist: set %s to %s', attr, safe_output(attr, entry_values)) logger.debug('addifexist: set %s to %s',
attr, safe_output(attr, entry_values))
entry.raw[attr] = entry_values entry.raw[attr] = entry_values
elif action == 'only': elif action == 'only':
self.debug("only: set %s to '%s', current value %s", attr, safe_output(attr, update_value), safe_output(attr, entry_values)) logger.debug("only: set %s to '%s', current value %s",
attr,
safe_output(attr, update_value),
safe_output(attr, entry_values))
if only.get(attr): if only.get(attr):
entry_values.append(update_value) entry_values.append(update_value)
else: else:
entry_values = [update_value] entry_values = [update_value]
only[attr] = True only[attr] = True
entry.raw[attr] = entry_values entry.raw[attr] = entry_values
self.debug('only: updated value %s', safe_output(attr, entry_values)) logger.debug('only: updated value %s',
safe_output(attr, entry_values))
elif action == 'onlyifexist': elif action == 'onlyifexist':
self.debug("onlyifexist: '%s' to %s, current value %s", safe_output(attr, update_value), attr, safe_output(attr, entry_values)) logger.debug("onlyifexist: '%s' to %s, current value %s",
safe_output(attr, update_value),
attr,
safe_output(attr, entry_values))
# Only set the attribute if the entry exist's. We # Only set the attribute if the entry exist's. We
# determine this based on whether it has an objectclass # determine this based on whether it has an objectclass
if entry.get('objectclass'): if entry.get('objectclass'):
@@ -719,7 +742,8 @@ class LDAPUpdate(object):
else: else:
entry_values = [update_value] entry_values = [update_value]
only[attr] = True only[attr] = True
self.debug('onlyifexist: set %s to %s', attr, safe_output(attr, entry_values)) logger.debug('onlyifexist: set %s to %s',
attr, safe_output(attr, entry_values))
entry.raw[attr] = entry_values entry.raw[attr] = entry_values
elif action == 'deleteentry': elif action == 'deleteentry':
# skip this update type, it occurs in __delete_entries() # skip this update type, it occurs in __delete_entries()
@@ -731,24 +755,26 @@ class LDAPUpdate(object):
try: try:
entry_values.remove(old) entry_values.remove(old)
except ValueError: except ValueError:
self.debug('replace: %s not found, skipping', safe_output(attr, old)) logger.debug('replace: %s not found, skipping',
safe_output(attr, old))
else: else:
entry_values.append(new) entry_values.append(new)
self.debug('replace: updated value %s', safe_output(attr, entry_values)) logger.debug('replace: updated value %s',
safe_output(attr, entry_values))
entry.raw[attr] = entry_values entry.raw[attr] = entry_values
return entry return entry
def print_entity(self, e, message=None): def print_entity(self, e, message=None):
"""The entity object currently lacks a str() method""" """The entity object currently lacks a str() method"""
self.debug("---------------------------------------------") logger.debug("---------------------------------------------")
if message: if message:
self.debug("%s", message) logger.debug("%s", message)
self.debug("dn: %s", e.dn) logger.debug("dn: %s", e.dn)
for a, value in e.raw.items(): for a, value in e.raw.items():
self.debug('%s:', a) logger.debug('%s:', a)
for l in value: for l in value:
self.debug("\t%s", safe_output(a, l)) logger.debug("\t%s", safe_output(a, l))
def _update_record(self, update): def _update_record(self, update):
found = False found = False
@@ -763,15 +789,15 @@ class LDAPUpdate(object):
raise BadSyntax("More than 1 entry returned on a dn search!? %s" % new_entry.dn) raise BadSyntax("More than 1 entry returned on a dn search!? %s" % new_entry.dn)
entry = e[0] entry = e[0]
found = True found = True
self.debug("Updating existing entry: %s", entry.dn) logger.debug("Updating existing entry: %s", entry.dn)
except errors.NotFound: except errors.NotFound:
# Doesn't exist, start with the default entry # Doesn't exist, start with the default entry
entry = new_entry entry = new_entry
self.debug("New entry: %s", entry.dn) logger.debug("New entry: %s", entry.dn)
except errors.DatabaseError: except errors.DatabaseError:
# Doesn't exist, start with the default entry # Doesn't exist, start with the default entry
entry = new_entry entry = new_entry
self.debug("New entry, using default value: %s", entry.dn) logger.debug("New entry, using default value: %s", entry.dn)
self.print_entity(entry, "Initial value") self.print_entity(entry, "Initial value")
@@ -796,13 +822,13 @@ class LDAPUpdate(object):
except errors.NotFound: except errors.NotFound:
# parent entry of the added entry does not exist # parent entry of the added entry does not exist
# this may not be an error (e.g. entries in NIS container) # this may not be an error (e.g. entries in NIS container)
self.error("Parent DN of %s may not exist, cannot " logger.error("Parent DN of %s may not exist, cannot "
"create the entry", entry.dn) "create the entry", entry.dn)
return return
added = True added = True
self.modified = True self.modified = True
except Exception as e: except Exception as e:
self.error("Add failure %s", e) logger.error("Add failure %s", e)
else: else:
# Update LDAP # Update LDAP
try: try:
@@ -812,19 +838,19 @@ class LDAPUpdate(object):
safe_changes = [] safe_changes = []
for (type, attr, values) in changes: for (type, attr, values) in changes:
safe_changes.append((type, attr, safe_output(attr, values))) safe_changes.append((type, attr, safe_output(attr, values)))
self.debug("%s" % safe_changes) logger.debug("%s", safe_changes)
self.debug("Updated %d" % updated) logger.debug("Updated %d", updated)
if updated: if updated:
self.conn.update_entry(entry) self.conn.update_entry(entry)
self.debug("Done") logger.debug("Done")
except errors.EmptyModlist: except errors.EmptyModlist:
self.debug("Entry already up-to-date") logger.debug("Entry already up-to-date")
updated = False updated = False
except errors.DatabaseError as e: except errors.DatabaseError as e:
self.error("Update failed: %s", e) logger.error("Update failed: %s", e)
updated = False updated = False
except errors.ACIError as e: except errors.ACIError as e:
self.error("Update failed: %s", e) logger.error("Update failed: %s", e)
updated = False updated = False
if updated: if updated:
@@ -844,14 +870,14 @@ class LDAPUpdate(object):
dn = updates['dn'] dn = updates['dn']
try: try:
self.debug("Deleting entry %s", dn) logger.debug("Deleting entry %s", dn)
self.conn.delete_entry(dn) self.conn.delete_entry(dn)
self.modified = True self.modified = True
except errors.NotFound as e: except errors.NotFound as e:
self.debug("%s did not exist:%s", dn, e) logger.debug("%s did not exist:%s", dn, e)
self.modified = True self.modified = True
except errors.DatabaseError as e: except errors.DatabaseError as e:
self.error("Delete failed: %s", e) logger.error("Delete failed: %s", e)
def get_all_files(self, root, recursive=False): def get_all_files(self, root, recursive=False):
"""Get all update files""" """Get all update files"""
@@ -866,7 +892,7 @@ class LDAPUpdate(object):
return f return f
def _run_update_plugin(self, plugin_name): def _run_update_plugin(self, plugin_name):
self.log.debug("Executing upgrade plugin: %s", plugin_name) logger.debug("Executing upgrade plugin: %s", plugin_name)
restart_ds, updates = self.api.Updater[plugin_name]() restart_ds, updates = self.api.Updater[plugin_name]()
if updates: if updates:
self._run_updates(updates) self._run_updates(updates)
@@ -912,10 +938,10 @@ class LDAPUpdate(object):
for f in upgrade_files: for f in upgrade_files:
try: try:
self.debug("Parsing update file '%s'" % f) logger.debug("Parsing update file '%s'", f)
data = self.read_file(f) data = self.read_file(f)
except Exception as e: except Exception as e:
self.error("error reading update file '%s'", f) logger.error("error reading update file '%s'", f)
raise RuntimeError(e) raise RuntimeError(e)
self.parse_update_file(f, data, all_updates) self.parse_update_file(f, data, all_updates)
@@ -933,5 +959,5 @@ class LDAPUpdate(object):
self.conn = None self.conn = None
def restart_ds(self): def restart_ds(self):
self.log.debug('Restarting directory server to apply updates') logger.debug('Restarting directory server to apply updates')
installutils.restart_dirsrv() installutils.restart_dirsrv()

View File

@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipalib import Registry, errors from ipalib import Registry, errors
from ipalib import Updater from ipalib import Updater
from ipapython.dn import DN from ipapython.dn import DN
@@ -24,6 +26,8 @@ from ipapython.ipa_log_manager import root_logger
from ipaserver.install import sysupgrade from ipaserver.install import sysupgrade
from ipaserver.install.adtrustinstance import ADTRUSTInstance from ipaserver.install.adtrustinstance import ADTRUSTInstance
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
DEFAULT_ID_RANGE_SIZE = 200000 DEFAULT_ID_RANGE_SIZE = 200000
@@ -146,7 +150,7 @@ class update_default_trust_view(Updater):
# First, see if trusts are enabled on the server # First, see if trusts are enabled on the server
if not self.api.Command.adtrust_is_enabled()['result']: if not self.api.Command.adtrust_is_enabled()['result']:
self.log.debug('AD Trusts are not enabled on this server') logger.debug('AD Trusts are not enabled on this server')
return False, [] return False, []
# Second, make sure the Default Trust View does not exist yet # Second, make sure the Default Trust View does not exist yet
@@ -155,7 +159,7 @@ class update_default_trust_view(Updater):
except errors.NotFound: except errors.NotFound:
pass pass
else: else:
self.log.debug('Default Trust View already present on this server') logger.debug('Default Trust View already present on this server')
return False, [] return False, []
# We have a server with AD trust support without Default Trust View. # We have a server with AD trust support without Default Trust View.
@@ -200,7 +204,7 @@ class update_sigden_extdom_broken_config(Updater):
try: try:
entry = ldap.get_entry(dn, attrs_list=[basedn_attr]) entry = ldap.get_entry(dn, attrs_list=[basedn_attr])
except errors.NotFound: except errors.NotFound:
self.log.debug("configuration for %s not found, skipping", dn) logger.debug("configuration for %s not found, skipping", dn)
else: else:
configured_suffix = entry.single_value.get(basedn_attr) configured_suffix = entry.single_value.get(basedn_attr)
if configured_suffix is None: if configured_suffix is None:
@@ -212,19 +216,19 @@ class update_sigden_extdom_broken_config(Updater):
elif configured_suffix == "$SUFFIX": elif configured_suffix == "$SUFFIX":
# configured value is wrong, fix it # configured value is wrong, fix it
entry.single_value[basedn_attr] = str(self.api.env.basedn) entry.single_value[basedn_attr] = str(self.api.env.basedn)
self.log.debug("updating attribute %s of %s to correct " logger.debug("updating attribute %s of %s to correct "
"value %s", basedn_attr, dn, "value %s",
self.api.env.basedn) basedn_attr, dn, self.api.env.basedn)
ldap.update_entry(entry) ldap.update_entry(entry)
modified = True modified = True
else: else:
self.log.debug("configured basedn for %s is okay", dn) logger.debug("configured basedn for %s is okay", dn)
return modified return modified
def execute(self, **options): def execute(self, **options):
if sysupgrade.get_upgrade_state('sidgen', 'config_basedn_updated'): if sysupgrade.get_upgrade_state('sidgen', 'config_basedn_updated'):
self.log.debug("Already done, skipping") logger.debug("Already done, skipping")
return False, () return False, ()
restart = False restart = False
@@ -250,7 +254,7 @@ class update_sids(Updater):
ldap = self.api.Backend.ldap2 ldap = self.api.Backend.ldap2
if sysupgrade.get_upgrade_state('sidgen', 'update_sids') is not True: if sysupgrade.get_upgrade_state('sidgen', 'update_sids') is not True:
self.log.debug("SIDs do not need to be generated") logger.debug("SIDs do not need to be generated")
return False, () return False, ()
# check if IPA domain for AD trust has been created, and if we need to # check if IPA domain for AD trust has been created, and if we need to
@@ -264,8 +268,8 @@ class update_sids(Updater):
try: try:
entry = ldap.get_entry(domain_IPA_AD_dn, attrs_list=[attr_name]) entry = ldap.get_entry(domain_IPA_AD_dn, attrs_list=[attr_name])
except errors.NotFound: except errors.NotFound:
self.log.debug("IPA domain object %s is not configured", logger.debug("IPA domain object %s is not configured",
domain_IPA_AD_dn) domain_IPA_AD_dn)
sysupgrade.set_upgrade_state('sidgen', 'update_sids', False) sysupgrade.set_upgrade_state('sidgen', 'update_sids', False)
return False, () return False, ()
else: else:
@@ -286,9 +290,9 @@ class update_sids(Updater):
try: try:
ldap.add_entry(task_entry) ldap.add_entry(task_entry)
except errors.DuplicateEntry: except errors.DuplicateEntry:
self.log.debug("sidgen task already created") logger.debug("sidgen task already created")
else: else:
self.log.debug("sidgen task has been created") logger.debug("sidgen task has been created")
# we have to check all trusts domains which may been affected by the # we have to check all trusts domains which may been affected by the
# bug. Symptom is missing 'ipaNTSecurityIdentifier' attribute # bug. Symptom is missing 'ipaNTSecurityIdentifier' attribute
@@ -307,11 +311,11 @@ class update_sids(Updater):
pass pass
else: else:
if truncated: if truncated:
self.log.warning("update_sids: Search results were truncated") logger.warning("update_sids: Search results were truncated")
for entry in trust_domain_entries: for entry in trust_domain_entries:
domain = entry.single_value["cn"] domain = entry.single_value["cn"]
self.log.error( logger.error(
"Your trust to %s is broken. Please re-create it by " "Your trust to %s is broken. Please re-create it by "
"running 'ipa trust-add' again.", domain) "running 'ipa trust-add' again.", domain)
@@ -331,7 +335,7 @@ class update_tdo_gidnumber(Updater):
# First, see if trusts are enabled on the server # First, see if trusts are enabled on the server
if not self.api.Command.adtrust_is_enabled()['result']: if not self.api.Command.adtrust_is_enabled()['result']:
self.log.debug('AD Trusts are not enabled on this server') logger.debug('AD Trusts are not enabled on this server')
return False, [] return False, []
# Read the gidnumber of the fallback group # Read the gidnumber of the fallback group
@@ -343,13 +347,13 @@ class update_tdo_gidnumber(Updater):
entry = ldap.get_entry(dn, ['gidnumber']) entry = ldap.get_entry(dn, ['gidnumber'])
gidNumber = entry.get('gidnumber') gidNumber = entry.get('gidnumber')
except errors.NotFound: except errors.NotFound:
self.log.error("{0} not found".format( logger.error("%s not found",
ADTRUSTInstance.FALLBACK_GROUP_NAME)) ADTRUSTInstance.FALLBACK_GROUP_NAME)
return False, () return False, ()
if not gidNumber: if not gidNumber:
self.log.error("{0} does not have a gidnumber".format( logger.error("%s does not have a gidnumber",
ADTRUSTInstance.FALLBACK_GROUP_NAME)) ADTRUSTInstance.FALLBACK_GROUP_NAME)
return False, () return False, ()
# For each trusted domain object, add gidNumber # For each trusted domain object, add gidNumber
@@ -366,14 +370,14 @@ class update_tdo_gidnumber(Updater):
try: try:
tdo['gidnumber'] = gidNumber tdo['gidnumber'] = gidNumber
ldap.update_entry(tdo) ldap.update_entry(tdo)
self.log.debug("Added gidnumber {0} to {1}".format( logger.debug("Added gidnumber %s to %s",
gidNumber, tdo.dn)) gidNumber, tdo.dn)
except Exception: except Exception:
self.log.warning( logger.warning(
"Failed to add gidnumber to {0}".format(tdo.dn)) "Failed to add gidnumber to %s", tdo.dn)
except errors.NotFound: except errors.NotFound:
self.log.debug("No trusted domain object to update") logger.debug("No trusted domain object to update")
return False, () return False, ()
return False, () return False, ()

View File

@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipaserver.install import installutils, cainstance from ipaserver.install import installutils, cainstance
from ipalib import errors from ipalib import errors
from ipalib import Updater from ipalib import Updater
@@ -25,6 +27,8 @@ from ipalib.plugable import Registry
from ipaplatform.paths import paths from ipaplatform.paths import paths
from ipapython.dn import DN from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@register() @register()
@@ -36,7 +40,7 @@ class update_ca_renewal_master(Updater):
def execute(self, **options): def execute(self, **options):
ca = cainstance.CAInstance(self.api.env.realm) ca = cainstance.CAInstance(self.api.env.realm)
if not ca.is_configured(): if not ca.is_configured():
self.debug("CA is not configured on this host") logger.debug("CA is not configured on this host")
return False, [] return False, []
ldap = self.api.Backend.ldap2 ldap = self.api.Backend.ldap2
@@ -50,7 +54,7 @@ class update_ca_renewal_master(Updater):
except errors.NotFound: except errors.NotFound:
pass pass
else: else:
self.debug("found CA renewal master %s", entries[0].dn[1].value) logger.debug("found CA renewal master %s", entries[0].dn[1].value)
master = False master = False
updates = [] updates = []
@@ -78,11 +82,11 @@ class update_ca_renewal_master(Updater):
} }
request_id = certmonger.get_request_id(criteria) request_id = certmonger.get_request_id(criteria)
if request_id is not None: if request_id is not None:
self.debug("found certmonger request for RA cert") logger.debug("found certmonger request for RA cert")
ca_name = certmonger.get_request_value(request_id, 'ca-name') ca_name = certmonger.get_request_value(request_id, 'ca-name')
if ca_name is None: if ca_name is None:
self.warning( logger.warning(
"certmonger request for RA cert is missing ca_name, " "certmonger request for RA cert is missing ca_name, "
"assuming local CA is renewal slave") "assuming local CA is renewal slave")
return False, [] return False, []
@@ -95,12 +99,12 @@ class update_ca_renewal_master(Updater):
elif ca_name == 'dogtag-ipa-ca-renew-agent': elif ca_name == 'dogtag-ipa-ca-renew-agent':
return False, [] return False, []
else: else:
self.warning( logger.warning(
"certmonger request for RA cert has unknown ca_name '%s', " "certmonger request for RA cert has unknown ca_name '%s', "
"assuming local CA is renewal slave", ca_name) "assuming local CA is renewal slave", ca_name)
return False, [] return False, []
else: else:
self.debug("certmonger request for RA cert not found") logger.debug("certmonger request for RA cert not found")
config = installutils.get_directive( config = installutils.get_directive(
paths.CA_CS_CFG_PATH, 'subsystem.select', '=') paths.CA_CS_CFG_PATH, 'subsystem.select', '=')
@@ -110,7 +114,7 @@ class update_ca_renewal_master(Updater):
elif config == 'Clone': elif config == 'Clone':
return False, [] return False, []
else: else:
self.warning( logger.warning(
"CS.cfg has unknown subsystem.select value '%s', " "CS.cfg has unknown subsystem.select value '%s', "
"assuming local CA is renewal slave", config) "assuming local CA is renewal slave", config)
return (False, False, []) return (False, False, [])

View File

@@ -19,6 +19,8 @@
from __future__ import absolute_import from __future__ import absolute_import
import logging
import dns.exception import dns.exception
import re import re
import traceback import traceback
@@ -35,6 +37,8 @@ from ipaserver.install import sysupgrade
from ipaserver.install.bindinstance import ensure_dnsserver_container_exists from ipaserver.install.bindinstance import ensure_dnsserver_container_exists
from ipaserver.plugins.dns import dns_container_exists from ipaserver.plugins.dns import dns_container_exists
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -65,8 +69,8 @@ class DNSUpdater(Updater):
@property @property
def ldif_writer(self): def ldif_writer(self):
if not self._ldif_writer: if not self._ldif_writer:
self.log.info('Original zones will be saved in LDIF format in ' logger.info('Original zones will be saved in LDIF format in '
'%s file' % self.backup_path) '%s file', self.backup_path)
self._ldif_writer = LDIFWriter(open(self.backup_path, 'w')) self._ldif_writer = LDIFWriter(open(self.backup_path, 'w'))
return self._ldif_writer return self._ldif_writer
@@ -137,7 +141,7 @@ class update_ipaconfigstring_dnsversion_to_ipadnsversion(Updater):
# version data are already migrated # version data are already migrated
return False, [] return False, []
self.log.debug('Migrating DNS ipaConfigString to ipaDNSVersion') logger.debug('Migrating DNS ipaConfigString to ipaDNSVersion')
container_entry['objectclass'].append('ipadnscontainer') container_entry['objectclass'].append('ipadnscontainer')
version = 0 version = 0
for config_option in container_entry.get("ipaConfigString", []): for config_option in container_entry.get("ipaConfigString", []):
@@ -146,12 +150,12 @@ class update_ipaconfigstring_dnsversion_to_ipadnsversion(Updater):
if matched: if matched:
version = int(matched.group("version")) version = int(matched.group("version"))
else: else:
self.log.error( logger.error(
'Failed to parse DNS version from ipaConfigString, ' 'Failed to parse DNS version from ipaConfigString, '
'defaulting to version %s', version) 'defaulting to version %s', version)
container_entry['ipadnsversion'] = version container_entry['ipadnsversion'] = version
ldap.update_entry(container_entry) ldap.update_entry(container_entry)
self.log.debug('ipaDNSVersion = %s', version) logger.debug('ipaDNSVersion = %s', version)
return False, [] return False, []
@@ -189,7 +193,7 @@ class update_dnszones(Updater):
try: try:
zones = self.api.Command.dnszone_find(all=True)['result'] zones = self.api.Command.dnszone_find(all=True)['result']
except errors.NotFound: except errors.NotFound:
self.log.debug('No DNS zone to update found') logger.debug('No DNS zone to update found')
return False, [] return False, []
for zone in zones: for zone in zones:
@@ -284,7 +288,7 @@ class update_master_to_dnsforwardzones(DNSUpdater):
# no upgrade is required # no upgrade is required
return False, [] return False, []
self.log.debug('Updating forward zones') logger.debug('Updating forward zones')
# update the DNSVersion, following upgrade can be executed only once # update the DNSVersion, following upgrade can be executed only once
self.api.Command['dnsconfig_mod'](ipadnsversion=1) self.api.Command['dnsconfig_mod'](ipadnsversion=1)
@@ -311,7 +315,7 @@ class update_master_to_dnsforwardzones(DNSUpdater):
pass pass
if not zones: if not zones:
self.log.debug('No DNS zone to update found') logger.debug('No DNS zone to update found')
return False, [] return False, []
zones_to_transform = [] zones_to_transform = []
@@ -326,27 +330,27 @@ class update_master_to_dnsforwardzones(DNSUpdater):
zones_to_transform.append(zone) zones_to_transform.append(zone)
if zones_to_transform: if zones_to_transform:
self.log.info('Zones with specified forwarders with policy different' logger.info('Zones with specified forwarders with policy '
' than none will be transformed to forward zones.') 'different than none will be transformed to forward '
'zones.')
# update # update
for zone in zones_to_transform: for zone in zones_to_transform:
try: try:
self.backup_zone(zone) self.backup_zone(zone)
except Exception: except Exception:
self.log.error('Unable to create backup for zone, ' logger.error('Unable to create backup for zone, '
'terminating zone upgrade') 'terminating zone upgrade')
self.log.error(traceback.format_exc()) logger.error("%s", traceback.format_exc())
return False, [] return False, []
# delete master zone # delete master zone
try: try:
self.api.Command['dnszone_del'](zone['idnsname']) self.api.Command['dnszone_del'](zone['idnsname'])
except Exception as e: except Exception as e:
self.log.error('Transform to forwardzone terminated: ' logger.error('Transform to forwardzone terminated: '
'removing zone %s failed (%s)' % ( 'removing zone %s failed (%s)',
zone['idnsname'][0], e) zone['idnsname'][0], e)
) logger.error("%s", traceback.format_exc())
self.log.error(traceback.format_exc())
continue continue
# create forward zone # create forward zone
@@ -358,11 +362,11 @@ class update_master_to_dnsforwardzones(DNSUpdater):
'skip_overlap_check': True, 'skip_overlap_check': True,
} }
self.api.Command['dnsforwardzone_add'](zone['idnsname'][0], **kw) self.api.Command['dnsforwardzone_add'](zone['idnsname'][0], **kw)
except Exception as e: except Exception:
self.log.error('Transform to forwardzone terminated: creating ' logger.error('Transform to forwardzone terminated: '
'forwardzone %s failed' % 'creating forwardzone %s failed',
zone['idnsname'][0]) zone['idnsname'][0])
self.log.error(traceback.format_exc()) logger.error("%s", traceback.format_exc())
continue continue
# create permission if original zone has one # create permission if original zone has one
@@ -370,14 +374,14 @@ class update_master_to_dnsforwardzones(DNSUpdater):
try: try:
perm_name = self.api.Command['dnsforwardzone_add_permission']( perm_name = self.api.Command['dnsforwardzone_add_permission'](
zone['idnsname'][0])['value'] zone['idnsname'][0])['value']
except Exception as e: except Exception:
self.log.error('Transform to forwardzone terminated: ' logger.error('Transform to forwardzone terminated: '
'Adding managed by permission to forward zone' 'Adding managed by permission to forward '
' %s failed' % zone['idnsname']) 'zone %s failed', zone['idnsname'])
self.log.error(traceback.format_exc()) logger.error("%s", traceback.format_exc())
self.log.info('Zone %s was transformed to forward zone ' logger.info('Zone %s was transformed to forward zone '
' without managed permissions', ' without managed permissions',
zone['idnsname'][0]) zone['idnsname'][0])
continue continue
else: else:
@@ -388,18 +392,20 @@ class update_master_to_dnsforwardzones(DNSUpdater):
try: try:
self.api.Command['permission_add_member'](perm_name, self.api.Command['permission_add_member'](perm_name,
privilege=privileges) privilege=privileges)
except Exception as e: except Exception:
self.log.error('Unable to restore privileges for ' logger.error('Unable to restore privileges '
'permission %s, for zone %s' 'for permission %s, for zone %s',
% (perm_name, zone['idnsname'])) perm_name, zone['idnsname'])
self.log.error(traceback.format_exc()) logger.error("%s", traceback.format_exc())
self.log.info('Zone %s was transformed to forward zone' logger.info('Zone %s was transformed to '
' without restored privileges', 'forward zone without restored '
zone['idnsname'][0]) 'privileges',
zone['idnsname'][0])
continue continue
self.log.debug('Zone %s was sucessfully transformed to forward zone', logger.debug('Zone %s was sucessfully transformed to forward '
zone['idnsname'][0]) 'zone',
zone['idnsname'][0])
return False, [] return False, []
@@ -437,18 +443,18 @@ class update_dnsforward_emptyzones(DNSUpdater):
continue continue
if not logged_once: if not logged_once:
self.log.info('Forward policy for zones conflicting with ' logger.info('Forward policy for zones conflicting with '
'automatic empty zones will be changed to ' 'automatic empty zones will be changed to "only"')
'"only"')
logged_once = True logged_once = True
# backup # backup
try: try:
self.backup_zone(zone) self.backup_zone(zone)
except Exception: except Exception:
self.log.error('Unable to create backup for zone %s, ' logger.error('Unable to create backup for zone %s, '
'terminating zone upgrade', zone['idnsname'][0]) 'terminating zone upgrade',
self.log.error(traceback.format_exc()) zone['idnsname'][0])
logger.error("%s", traceback.format_exc())
continue continue
# change forward policy # change forward policy
@@ -458,13 +464,13 @@ class update_dnsforward_emptyzones(DNSUpdater):
idnsforwardpolicy=u'only' idnsforwardpolicy=u'only'
) )
except Exception as e: except Exception as e:
self.log.error('Forward policy update for zone %s failed ' logger.error('Forward policy update for zone %s failed '
'(%s)' % (zone['idnsname'][0], e)) '(%s)', zone['idnsname'][0], e)
self.log.error(traceback.format_exc()) logger.error("%s", traceback.format_exc())
continue continue
self.log.debug('Zone %s was sucessfully modified to use ' logger.debug('Zone %s was sucessfully modified to use forward '
'forward policy "only"', zone['idnsname'][0]) 'policy "only"', zone['idnsname'][0])
def update_global_ldap_forwarder(self): def update_global_ldap_forwarder(self):
config = self.api.Command['dnsconfig_show'](all=True, config = self.api.Command['dnsconfig_show'](all=True,
@@ -473,9 +479,9 @@ class update_dnsforward_emptyzones(DNSUpdater):
config.get('idnsforwardpolicy', [u'first'])[0] == u'first' config.get('idnsforwardpolicy', [u'first'])[0] == u'first'
and config.get('idnsforwarders', []) and config.get('idnsforwarders', [])
): ):
self.log.info('Global forward policy in LDAP for all servers will ' logger.info('Global forward policy in LDAP for all servers will '
'be changed to "only" to avoid conflicts with ' 'be changed to "only" to avoid conflicts with '
'automatic empty zones') 'automatic empty zones')
self.backup_zone(config) self.backup_zone(config)
self.api.Command['dnsconfig_mod'](idnsforwardpolicy=u'only') self.api.Command['dnsconfig_mod'](idnsforwardpolicy=u'only')
@@ -485,8 +491,8 @@ class update_dnsforward_emptyzones(DNSUpdater):
# forwardzones already use new semantics, no upgrade is required # forwardzones already use new semantics, no upgrade is required
return False, [] return False, []
self.log.debug('Updating forwarding policies in LDAP ' logger.debug('Updating forwarding policies in LDAP '
'to avoid conflicts with automatic empty zones') 'to avoid conflicts with automatic empty zones')
# update the DNSVersion, following upgrade can be executed only once # update the DNSVersion, following upgrade can be executed only once
self.api.Command['dnsconfig_mod'](ipadnsversion=2) self.api.Command['dnsconfig_mod'](ipadnsversion=2)
@@ -495,11 +501,11 @@ class update_dnsforward_emptyzones(DNSUpdater):
if dnsutil.has_empty_zone_addresses(self.api.env.host): if dnsutil.has_empty_zone_addresses(self.api.env.host):
self.update_global_ldap_forwarder() self.update_global_ldap_forwarder()
except dns.exception.DNSException as ex: except dns.exception.DNSException as ex:
self.log.error('Skipping update of global DNS forwarder in LDAP: ' logger.error('Skipping update of global DNS forwarder in LDAP: '
'Unable to determine if local server is using an ' 'Unable to determine if local server is using an '
'IP address belonging to an automatic empty zone. ' 'IP address belonging to an automatic empty zone. '
'Consider changing forwarding policy to "only". ' 'Consider changing forwarding policy to "only". '
'DNS exception: %s', ex) 'DNS exception: %s', ex)
return False, [] return False, []
@@ -513,33 +519,33 @@ class update_dnsserver_configuration_into_ldap(DNSUpdater):
def execute(self, **options): def execute(self, **options):
ldap = self.api.Backend.ldap2 ldap = self.api.Backend.ldap2
if sysupgrade.get_upgrade_state('dns', 'server_config_to_ldap'): if sysupgrade.get_upgrade_state('dns', 'server_config_to_ldap'):
self.log.debug('upgrade is not needed') logger.debug('upgrade is not needed')
return False, [] return False, []
dns_container_dn = DN(self.api.env.container_dns, self.api.env.basedn) dns_container_dn = DN(self.api.env.container_dns, self.api.env.basedn)
try: try:
ldap.get_entry(dns_container_dn) ldap.get_entry(dns_container_dn)
except errors.NotFound: except errors.NotFound:
self.log.debug('DNS container not found, nothing to upgrade') logger.debug('DNS container not found, nothing to upgrade')
sysupgrade.set_upgrade_state('dns', 'server_config_to_ldap', True) sysupgrade.set_upgrade_state('dns', 'server_config_to_ldap', True)
return False, [] return False, []
result = self.api.Command.server_show(self.api.env.host)['result'] result = self.api.Command.server_show(self.api.env.host)['result']
if not 'DNS server' in result.get('enabled_role_servrole', []): if not 'DNS server' in result.get('enabled_role_servrole', []):
self.log.debug('This server is not DNS server, nothing to upgrade') logger.debug('This server is not DNS server, nothing to upgrade')
sysupgrade.set_upgrade_state('dns', 'server_config_to_ldap', True) sysupgrade.set_upgrade_state('dns', 'server_config_to_ldap', True)
return False, [] return False, []
# create container first, if doesn't exist # create container first, if doesn't exist
ensure_dnsserver_container_exists(ldap, self.api, logger=self.log) ensure_dnsserver_container_exists(ldap, self.api)
try: try:
self.api.Command.dnsserver_add(self.api.env.host) self.api.Command.dnsserver_add(self.api.env.host)
except errors.DuplicateEntry: except errors.DuplicateEntry:
self.log.debug("DNS server configuration already exists " logger.debug("DNS server configuration already exists "
"in LDAP database") "in LDAP database")
else: else:
self.log.debug("DNS server configuration has been sucessfully " logger.debug("DNS server configuration has been sucessfully "
"created in LDAP database") "created in LDAP database")
sysupgrade.set_upgrade_state('dns', 'server_config_to_ldap', True) sysupgrade.set_upgrade_state('dns', 'server_config_to_ldap', True)
return False, [] return False, []

View File

@@ -17,10 +17,14 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipaserver.install import replication from ipaserver.install import replication
from ipalib import Registry from ipalib import Registry
from ipalib import Updater from ipalib import Updater
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
EXCLUDE_TEMPLATE = '(objectclass=*) $ EXCLUDE %s' EXCLUDE_TEMPLATE = '(objectclass=*) $ EXCLUDE %s'
@@ -36,7 +40,7 @@ class update_replica_attribute_lists(Updater):
def execute(self, **options): def execute(self, **options):
# We need an LDAPClient connection to the backend # We need an LDAPClient connection to the backend
self.log.debug("Start replication agreement exclude list update task") logger.debug("Start replication agreement exclude list update task")
conn = self.api.Backend.ldap2 conn = self.api.Backend.ldap2
repl = replication.ReplicationManager(self.api.env.realm, repl = replication.ReplicationManager(self.api.env.realm,
@@ -46,11 +50,11 @@ class update_replica_attribute_lists(Updater):
# We need to update only IPA replica agreements, not winsync # We need to update only IPA replica agreements, not winsync
ipa_replicas = repl.find_ipa_replication_agreements() ipa_replicas = repl.find_ipa_replication_agreements()
self.log.debug("Found %d agreement(s)", len(ipa_replicas)) logger.debug("Found %d agreement(s)", len(ipa_replicas))
for replica in ipa_replicas: for replica in ipa_replicas:
for desc in replica.get('description', []): for desc in replica.get('description', []):
self.log.debug(desc) logger.debug('%s', desc)
self._update_attr(repl, replica, self._update_attr(repl, replica,
'nsDS5ReplicatedAttributeList', 'nsDS5ReplicatedAttributeList',
@@ -61,7 +65,7 @@ class update_replica_attribute_lists(Updater):
self._update_attr(repl, replica, self._update_attr(repl, replica,
'nsds5ReplicaStripAttrs', replication.STRIP_ATTRS) 'nsds5ReplicaStripAttrs', replication.STRIP_ATTRS)
self.log.debug("Done updating agreements") logger.debug("Done updating agreements")
return False, [] # No restart, no updates return False, [] # No restart, no updates
@@ -81,16 +85,16 @@ class update_replica_attribute_lists(Updater):
""" """
attrlist = replica.single_value.get(attribute) attrlist = replica.single_value.get(attribute)
if attrlist is None: if attrlist is None:
self.log.debug("Adding %s", attribute) logger.debug("Adding %s", attribute)
# Need to add it altogether # Need to add it altogether
replica[attribute] = [template % " ".join(values)] replica[attribute] = [template % " ".join(values)]
try: try:
repl.conn.update_entry(replica) repl.conn.update_entry(replica)
self.log.debug("Updated") logger.debug("Updated")
except Exception as e: except Exception as e:
self.log.error("Error caught updating replica: %s", str(e)) logger.error("Error caught updating replica: %s", str(e))
else: else:
attrlist_normalized = attrlist.lower().split() attrlist_normalized = attrlist.lower().split()
@@ -98,17 +102,17 @@ class update_replica_attribute_lists(Updater):
if a.lower() not in attrlist_normalized] if a.lower() not in attrlist_normalized]
if missing: if missing:
self.log.debug("%s needs updating (missing: %s)", attribute, logger.debug("%s needs updating (missing: %s)", attribute,
', '.join(missing)) ', '.join(missing))
replica[attribute] = [ replica[attribute] = [
'%s %s' % (attrlist, ' '.join(missing))] '%s %s' % (attrlist, ' '.join(missing))]
try: try:
repl.conn.update_entry(replica) repl.conn.update_entry(replica)
self.log.debug("Updated %s", attribute) logger.debug("Updated %s", attribute)
except Exception as e: except Exception as e:
self.log.error("Error caught updating %s: %s", logger.error("Error caught updating %s: %s",
attribute, str(e)) attribute, str(e))
else: else:
self.log.debug("%s: No update necessary" % attribute) logger.debug("%s: No update necessary", attribute)

View File

@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import six import six
from ipalib import Registry, errors from ipalib import Registry, errors
@@ -24,6 +26,8 @@ from ipalib import Updater
from ipapython import ipautil from ipapython import ipautil
from ipapython.dn import DN from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
if six.PY3: if six.PY3:
@@ -56,8 +60,8 @@ class GenerateUpdateMixin(object):
If the input DN doesn't end with old_suffix, log, an raise ValueError. If the input DN doesn't end with old_suffix, log, an raise ValueError.
""" """
if not dn.endswith(old_suffix): if not dn.endswith(old_suffix):
self.error("unable to replace suffix '%s' with '%s' in '%s'", logger.error("unable to replace suffix '%s' with '%s' in '%s'",
old_suffix, new_suffix, dn) old_suffix, new_suffix, dn)
raise ValueError('no replacement made') raise ValueError('no replacement made')
return DN(*dn[:-len(old_suffix)]) + new_suffix return DN(*dn[:-len(old_suffix)]) + new_suffix

View File

@@ -2,6 +2,8 @@
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license # Copyright (C) 2015 FreeIPA Contributors see COPYING for license
# #
import logging
from ipalib import errors from ipalib import errors
from ipalib import Registry from ipalib import Registry
from ipalib import Updater from ipalib import Updater
@@ -10,6 +12,8 @@ from ipaserver.install import cainstance
from ipaserver.install import ldapupdate from ipaserver.install import ldapupdate
from ipaplatform.paths import paths from ipaplatform.paths import paths
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -23,7 +27,7 @@ class update_ca_topology(Updater):
ca = cainstance.CAInstance(self.api.env.realm) ca = cainstance.CAInstance(self.api.env.realm)
if not ca.is_configured(): if not ca.is_configured():
self.log.debug("CA is not configured on this host") logger.debug("CA is not configured on this host")
return False, [] return False, []
ld = ldapupdate.LDAPUpdate(ldapi=True, sub_dict={ ld = ldapupdate.LDAPUpdate(ldapi=True, sub_dict={

View File

@@ -2,6 +2,7 @@
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license # Copyright (C) 2016 FreeIPA Contributors see COPYING for license
# #
import logging
import time import time
import ldap import ldap
@@ -10,6 +11,8 @@ from ipalib import errors
from ipalib import Updater from ipalib import Updater
from ipapython.dn import DN from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -35,23 +38,23 @@ class update_dna_shared_config(Updater):
if entry.single_value.get('nsslapd-pluginenabled') == 'off': if entry.single_value.get('nsslapd-pluginenabled') == 'off':
return False, () return False, ()
except errors.NotFound: except errors.NotFound:
self.log.error("Could not find DNA plugin entry: %s" % logger.error("Could not find DNA plugin entry: %s",
dna_config_base) dna_config_base)
return False, () return False, ()
try: try:
entry = conn.get_entry(dna_config_base) entry = conn.get_entry(dna_config_base)
except errors.NotFound: except errors.NotFound:
self.log.error("Could not find DNA config entry: %s" % logger.error("Could not find DNA config entry: %s",
dna_config_base) dna_config_base)
return False, () return False, ()
sharedcfgdn = entry.single_value.get("dnaSharedCfgDN") sharedcfgdn = entry.single_value.get("dnaSharedCfgDN")
if sharedcfgdn is not None: if sharedcfgdn is not None:
sharedcfgdn = DN(sharedcfgdn) sharedcfgdn = DN(sharedcfgdn)
else: else:
self.log.error( logger.error(
"Could not find DNA shared config DN in entry: %s" % "Could not find DNA shared config DN in entry: %s",
dna_config_base) dna_config_base)
return False, () return False, ()
@@ -80,25 +83,25 @@ class update_dna_shared_config(Updater):
) )
break break
except errors.NotFound: except errors.NotFound:
self.log.debug( logger.debug(
"Unable to find DNA shared config entry for " "Unable to find DNA shared config entry for "
"dnaHostname=%s (under %s) so far. Retry in 2 sec." % "dnaHostname=%s (under %s) so far. Retry in 2 sec.",
(fqdn, sharedcfgdn) fqdn, sharedcfgdn
) )
time.sleep(2) time.sleep(2)
else: else:
self.log.error( logger.error(
"Could not get dnaHostname entries in {} seconds".format( "Could not get dnaHostname entries in %s seconds",
max_wait * 2) max_wait * 2
) )
return False, () return False, ()
# If there are several entries, all of them will be updated # If there are several entries, all of them will be updated
# just log a debug msg. This is likely the result of #5510 # just log a debug msg. This is likely the result of #5510
if len(entries) != 1: if len(entries) != 1:
self.log.debug( logger.debug(
"%d entries dnaHostname=%s under %s. One expected" % "%d entries dnaHostname=%s under %s. One expected",
(len(entries), fqdn, sharedcfgdn) len(entries), fqdn, sharedcfgdn
) )
# time to set the bind method and the protocol in the # time to set the bind method and the protocol in the
@@ -117,9 +120,9 @@ class update_dna_shared_config(Updater):
try: try:
conn.update_entry(entry) conn.update_entry(entry)
except Exception as e: except Exception as e:
self.log.error( logger.error(
"Failed to set SASL/GSSAPI bind method/protocol " "Failed to set SASL/GSSAPI bind method/protocol "
"in entry {}: {}".format(entry, e) "in entry %s: %s", entry, e
) )
# no restart, no update # no restart, no update
return False, () return False, ()

View File

@@ -83,6 +83,8 @@ No other keys are allowed in the template
The plugin also deletes permissions specified in OBSOLETE_PERMISSIONS. The plugin also deletes permissions specified in OBSOLETE_PERMISSIONS.
""" """
import logging
import six import six
from ipalib import api, errors from ipalib import api, errors
@@ -97,6 +99,8 @@ from ipaserver.plugins.permission import permission, permission_del
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
OBSOLETE_PERMISSIONS = { OBSOLETE_PERMISSIONS = {
@@ -327,7 +331,7 @@ class update_managed_permissions(Updater):
for acistr in acistrs: for acistr in acistrs:
if ACI(acistr).isequal(anonymous_read_aci): if ACI(acistr).isequal(anonymous_read_aci):
self.log.debug('Removing anonymous ACI: %s', acistr) logger.debug('Removing anonymous ACI: %s', acistr)
acistrs.remove(acistr) acistrs.remove(acistr)
break break
else: else:
@@ -356,18 +360,18 @@ class update_managed_permissions(Updater):
anonymous_read_aci = self.get_anonymous_read_aci(ldap) anonymous_read_aci = self.get_anonymous_read_aci(ldap)
if anonymous_read_aci: if anonymous_read_aci:
self.log.debug('Anonymous read ACI: %s', anonymous_read_aci) logger.debug('Anonymous read ACI: %s', anonymous_read_aci)
else: else:
self.log.debug('Anonymous ACI not found') logger.debug('Anonymous ACI not found')
current_obj = () # initially distinct from any obj value, even None current_obj = () # initially distinct from any obj value, even None
for name, template, obj in self.get_templates(): for name, template, obj in self.get_templates():
if current_obj != obj: if current_obj != obj:
if obj: if obj:
self.log.debug('Updating managed permissions for %s', logger.debug('Updating managed permissions for %s',
obj.name) obj.name)
else: else:
self.log.debug('Updating non-object managed permissions') logger.debug('Updating non-object managed permissions')
current_obj = obj current_obj = obj
self.update_permission(ldap, self.update_permission(ldap,
@@ -380,15 +384,15 @@ class update_managed_permissions(Updater):
self.remove_anonymous_read_aci(ldap, anonymous_read_aci) self.remove_anonymous_read_aci(ldap, anonymous_read_aci)
for obsolete_name in OBSOLETE_PERMISSIONS: for obsolete_name in OBSOLETE_PERMISSIONS:
self.log.debug('Deleting obsolete permission %s', obsolete_name) logger.debug('Deleting obsolete permission %s', obsolete_name)
try: try:
self.api.Command[permission_del](unicode(obsolete_name), self.api.Command[permission_del](unicode(obsolete_name),
force=True, force=True,
version=u'2.101') version=u'2.101')
except errors.NotFound: except errors.NotFound:
self.log.debug('Obsolete permission not found') logger.debug('Obsolete permission not found')
else: else:
self.log.debug('Obsolete permission deleted: %s', obsolete_name) logger.debug('Obsolete permission deleted: %s', obsolete_name)
return False, () return False, ()
@@ -429,7 +433,7 @@ class update_managed_permissions(Updater):
legacy_entry = ldap.get_entry(legacy_dn, legacy_entry = ldap.get_entry(legacy_dn,
['ipapermissiontype', 'cn']) ['ipapermissiontype', 'cn'])
except errors.NotFound: except errors.NotFound:
self.log.debug("Legacy permission %s not found", legacy_name) logger.debug("Legacy permission %s not found", legacy_name)
else: else:
if 'ipapermissiontype' not in legacy_entry: if 'ipapermissiontype' not in legacy_entry:
if is_new: if is_new:
@@ -440,47 +444,47 @@ class update_managed_permissions(Updater):
included, excluded = self.get_upgrade_attr_lists( included, excluded = self.get_upgrade_attr_lists(
acistr, legacy_acistrs) acistr, legacy_acistrs)
except IncompatibleACIModification: except IncompatibleACIModification:
self.log.error( logger.error(
"Permission '%s' has been modified from its " "Permission '%s' has been modified from its "
"default; not updating it to '%s'.", "default; not updating it to '%s'.",
legacy_name, name) legacy_name, name)
return return
else: else:
self.log.debug("Merging attributes from legacy " logger.debug("Merging attributes from legacy "
"permission '%s'", legacy_name) "permission '%s'", legacy_name)
self.log.debug("Included attrs: %s", logger.debug("Included attrs: %s",
', '.join(sorted(included))) ', '.join(sorted(included)))
self.log.debug("Excluded attrs: %s", logger.debug("Excluded attrs: %s",
', '.join(sorted(excluded))) ', '.join(sorted(excluded)))
entry['ipapermincludedattr'] = list(included) entry['ipapermincludedattr'] = list(included)
entry['ipapermexcludedattr'] = list(excluded) entry['ipapermexcludedattr'] = list(excluded)
remove_legacy = True remove_legacy = True
else: else:
self.log.debug("Ignoring attributes in legacy " logger.debug("Ignoring attributes in legacy "
"permission '%s' because '%s' exists", "permission '%s' because '%s' exists",
legacy_name, name) legacy_name, name)
remove_legacy = True remove_legacy = True
else: else:
self.log.debug("Ignoring V2 permission named '%s'" % logger.debug("Ignoring V2 permission named '%s'",
legacy_name) legacy_name)
update_aci = True update_aci = True
self.log.debug('Updating managed permission: %s', name) logger.debug('Updating managed permission: %s', name)
if is_new: if is_new:
ldap.add_entry(entry) ldap.add_entry(entry)
else: else:
try: try:
ldap.update_entry(entry) ldap.update_entry(entry)
except errors.EmptyModlist: except errors.EmptyModlist:
self.log.debug('No changes to permission: %s', name) logger.debug('No changes to permission: %s', name)
update_aci = False update_aci = False
if update_aci: if update_aci:
self.log.debug('Updating ACI for managed permission: %s', name) logger.debug('Updating ACI for managed permission: %s', name)
permission_plugin.update_aci(entry) permission_plugin.update_aci(entry)
if remove_legacy: if remove_legacy:
self.log.debug("Removing legacy permission '%s'", legacy_name) logger.debug("Removing legacy permission '%s'", legacy_name)
self.api.Command[permission_del](unicode(legacy_name)) self.api.Command[permission_del](unicode(legacy_name))
for name in template.get('replaces_system', ()): for name in template.get('replaces_system', ()):
@@ -489,14 +493,14 @@ class update_managed_permissions(Updater):
entry = ldap.get_entry(permission_plugin.get_dn(name), entry = ldap.get_entry(permission_plugin.get_dn(name),
['ipapermissiontype']) ['ipapermissiontype'])
except errors.NotFound: except errors.NotFound:
self.log.debug("Legacy permission '%s' not found", name) logger.debug("Legacy permission '%s' not found", name)
else: else:
flags = entry.get('ipapermissiontype', []) flags = entry.get('ipapermissiontype', [])
if list(flags) == ['SYSTEM']: if list(flags) == ['SYSTEM']:
self.log.debug("Removing legacy permission '%s'", name) logger.debug("Removing legacy permission '%s'", name)
self.api.Command[permission_del](name, force=True) self.api.Command[permission_del](name, force=True)
else: else:
self.log.debug("Ignoring V2 permission '%s'", name) logger.debug("Ignoring V2 permission '%s'", name)
def get_upgrade_attr_lists(self, current_acistring, default_acistrings): def get_upgrade_attr_lists(self, current_acistring, default_acistrings):
"""Compute included and excluded attributes for a new permission """Compute included and excluded attributes for a new permission
@@ -541,8 +545,8 @@ class update_managed_permissions(Updater):
current_aci = ACI(current_acistring) current_aci = ACI(current_acistring)
current_attrs = _pop_targetattr(current_aci) current_attrs = _pop_targetattr(current_aci)
self.log.debug("Current ACI for '%s': %s", logger.debug("Current ACI for '%s': %s",
current_aci.name, current_acistring) current_aci.name, current_acistring)
attrs_in_all_defaults = None attrs_in_all_defaults = None
attrs_in_any_defaults = set() attrs_in_any_defaults = set()
@@ -550,11 +554,11 @@ class update_managed_permissions(Updater):
for default_acistring in default_acistrings: for default_acistring in default_acistrings:
default_aci = ACI(default_acistring) default_aci = ACI(default_acistring)
default_attrs = _pop_targetattr(default_aci) default_attrs = _pop_targetattr(default_aci)
self.log.debug("Default ACI for '%s': %s", logger.debug("Default ACI for '%s': %s",
default_aci.name, default_acistring) default_aci.name, default_acistring)
if current_aci != default_aci: if current_aci != default_aci:
self.log.debug('ACIs not compatible') logger.debug('ACIs not compatible')
continue continue
else: else:
all_incompatible = False all_incompatible = False
@@ -566,7 +570,7 @@ class update_managed_permissions(Updater):
attrs_in_any_defaults |= default_attrs attrs_in_any_defaults |= default_attrs
if all_incompatible: if all_incompatible:
self.log.debug('All old default ACIs are incompatible') logger.debug('All old default ACIs are incompatible')
raise(IncompatibleACIModification()) raise(IncompatibleACIModification())
included = current_attrs - attrs_in_any_defaults included = current_attrs - attrs_in_any_defaults
@@ -659,8 +663,8 @@ class update_managed_permissions(Updater):
anonymous_read_aci.target['targetattr']['expression']) anonymous_read_aci.target['targetattr']['expression'])
read_blacklist &= attributes read_blacklist &= attributes
if read_blacklist: if read_blacklist:
self.log.debug('Excluded attributes for %s: %s', logger.debug('Excluded attributes for %s: %s',
name, ', '.join(read_blacklist)) name, ', '.join(read_blacklist))
entry['ipapermexcludedattr'] = list(read_blacklist) entry['ipapermexcludedattr'] = list(read_blacklist)
# Sanity check # Sanity check
@@ -700,7 +704,7 @@ class update_read_replication_agreements_permission(Updater):
try: try:
perm_entry = ldap.get_entry(old_perm_dn) perm_entry = ldap.get_entry(old_perm_dn)
except errors.NotFound: except errors.NotFound:
self.log.debug("Old permission not found") logger.debug("Old permission not found")
return False, () return False, ()
try: try:
@@ -709,9 +713,9 @@ class update_read_replication_agreements_permission(Updater):
# we can happily upgrade # we can happily upgrade
pass pass
else: else:
self.log.error("Permission '{}' cannot be upgraded. " logger.error("Permission '%s' cannot be upgraded. "
"Permission with target name '{}' already " "Permission with target name '%s' already "
"exists".format(old_perm_dn, new_perm_dn)) "exists", old_perm_dn, new_perm_dn)
return False, () return False, ()
# values are case insensitive # values are case insensitive
@@ -729,14 +733,14 @@ class update_read_replication_agreements_permission(Updater):
perm_entry['ipapermlocation'] = [] perm_entry['ipapermlocation'] = []
perm_entry['ipapermtargetfilter'] = [] perm_entry['ipapermtargetfilter'] = []
self.log.debug("Removing MANAGED attributes from permission %s", logger.debug("Removing MANAGED attributes from permission %s",
old_perm_dn) old_perm_dn)
try: try:
ldap.update_entry(perm_entry) ldap.update_entry(perm_entry)
except errors.EmptyModlist: except errors.EmptyModlist:
pass pass
# do modrdn on permission # do modrdn on permission
self.log.debug("modrdn: %s -> %s", old_perm_dn, new_perm_dn) logger.debug("modrdn: %s -> %s", old_perm_dn, new_perm_dn)
ldap.move_entry(old_perm_dn, new_perm_dn) ldap.move_entry(old_perm_dn, new_perm_dn)
return False, () return False, ()

View File

@@ -2,6 +2,8 @@
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license # Copyright (C) 2015 FreeIPA Contributors see COPYING for license
# #
import logging
from ipalib.plugable import Registry from ipalib.plugable import Registry
from ipalib import errors from ipalib import errors
from ipalib import Updater from ipalib import Updater
@@ -10,6 +12,8 @@ from ipapython.dn import DN
from ipaserver.install import sysupgrade from ipaserver.install import sysupgrade
from ipaserver.install.ldapupdate import LDAPUpdate from ipaserver.install.ldapupdate import LDAPUpdate
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -32,7 +36,7 @@ class update_nis_configuration(Updater):
# maps, we do not want to restore them again # maps, we do not want to restore them again
return return
self.log.debug("Recovering from missing NIS maps bug") logger.debug("Recovering from missing NIS maps bug")
suffix = "cn=NIS Server,cn=plugins,cn=config" suffix = "cn=NIS Server,cn=plugins,cn=config"
domain = self.api.env.domain domain = self.api.env.domain
@@ -71,7 +75,7 @@ class update_nis_configuration(Updater):
ldap.get_entry(dn, attrs_list=['cn']) ldap.get_entry(dn, attrs_list=['cn'])
except errors.NotFound: except errors.NotFound:
# NIS is not configured on system, do not execute update # NIS is not configured on system, do not execute update
self.log.debug("Skipping NIS update, NIS Server is not configured") logger.debug("Skipping NIS update, NIS Server is not configured")
# container does not exist, bug #5507 is not effective # container does not exist, bug #5507 is not effective
sysupgrade.set_upgrade_state( sysupgrade.set_upgrade_state(
@@ -79,7 +83,7 @@ class update_nis_configuration(Updater):
else: else:
self.__recover_from_missing_maps(ldap) self.__recover_from_missing_maps(ldap)
self.log.debug("Executing NIS Server update") logger.debug("Executing NIS Server update")
ld = LDAPUpdate(sub_dict={}, ldapi=True) ld = LDAPUpdate(sub_dict={}, ldapi=True)
ld.update([paths.NIS_UPDATE_ULDIF]) ld.update([paths.NIS_UPDATE_ULDIF])

View File

@@ -17,10 +17,14 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipalib import Registry, errors from ipalib import Registry, errors
from ipalib import Updater from ipalib import Updater
from ipapython.dn import DN from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -38,18 +42,18 @@ class update_pacs(Updater):
entry = ldap.get_entry(dn, ['ipakrbauthzdata']) entry = ldap.get_entry(dn, ['ipakrbauthzdata'])
pacs = entry.get('ipakrbauthzdata', []) pacs = entry.get('ipakrbauthzdata', [])
except errors.NotFound: except errors.NotFound:
self.log.warning('Error retrieving: %s' % str(dn)) logger.warning('Error retrieving: %s', str(dn))
return False, [] return False, []
nfs_pac_set = any(pac.startswith('nfs:') for pac in pacs) nfs_pac_set = any(pac.startswith('nfs:') for pac in pacs)
if not nfs_pac_set: if not nfs_pac_set:
self.log.debug('Adding nfs:NONE to default PAC types') logger.debug('Adding nfs:NONE to default PAC types')
updated_pacs = pacs + [u'nfs:NONE'] updated_pacs = pacs + [u'nfs:NONE']
entry['ipakrbauthzdata'] = updated_pacs entry['ipakrbauthzdata'] = updated_pacs
ldap.update_entry(entry) ldap.update_entry(entry)
else: else:
self.log.debug('PAC for nfs is already set, not adding nfs:NONE.') logger.debug('PAC for nfs is already set, not adding nfs:NONE.')
return False, [] return False, []

View File

@@ -2,6 +2,7 @@
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license # Copyright (C) 2016 FreeIPA Contributors see COPYING for license
# #
import logging
import os import os
import tempfile import tempfile
@@ -12,6 +13,8 @@ from ipaplatform.paths import paths
from ipapython.certdb import NSSDatabase from ipapython.certdb import NSSDatabase
from ipaserver.install import cainstance from ipaserver.install import cainstance
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -35,11 +38,11 @@ class update_ra_cert_store(Updater):
elif os.path.exists(paths.RA_AGENT_PEM): elif os.path.exists(paths.RA_AGENT_PEM):
# even though the certificate file exists, we will overwrite it # even though the certificate file exists, we will overwrite it
# as it's probabably something wrong anyway # as it's probabably something wrong anyway
self.log.warning( logger.warning(
"A certificate with the nickname 'ipaCert' exists in " "A certificate with the nickname 'ipaCert' exists in "
"the old '{}' NSS database as well as in the new " "the old '%s' NSS database as well as in the new "
"PEM file '{}'" "PEM file '%s'",
.format(paths.HTTPD_ALIAS_DIR, paths.RA_AGENT_PEM)) paths.HTTPD_ALIAS_DIR, paths.RA_AGENT_PEM)
_fd, p12file = tempfile.mkstemp(dir=certdb.secdir) _fd, p12file = tempfile.mkstemp(dir=certdb.secdir)
# no password is necessary as we will be saving it in clear anyway # no password is necessary as we will be saving it in clear anyway

View File

@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipalib.install import certstore from ipalib.install import certstore
from ipaplatform.paths import paths from ipaplatform.paths import paths
from ipaserver.install import certs from ipaserver.install import certs
@@ -25,6 +27,8 @@ from ipalib import Updater
from ipapython import certdb from ipapython import certdb
from ipapython.dn import DN from ipapython.dn import DN
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -66,8 +70,8 @@ class update_upload_cacrt(Updater):
try: try:
certstore.init_ca_entry(entry, cert, nickname, trust, eku) certstore.init_ca_entry(entry, cert, nickname, trust, eku)
except Exception as e: except Exception as e:
self.log.warning("Failed to create entry for %s: %s", logger.warning("Failed to create entry for %s: %s",
nickname, e) nickname, e)
continue continue
if nickname == ca_nickname: if nickname == ca_nickname:
ca_cert = cert ca_cert = cert

View File

@@ -45,6 +45,8 @@ And then a nested response for each IPA command method sent in the request
""" """
import logging
import six import six
from ipalib import api, errors from ipalib import api, errors
@@ -60,6 +62,8 @@ from ipapython.version import API_VERSION
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@register() @register()
@@ -120,7 +124,7 @@ class batch(Command):
newkw.setdefault('version', options['version']) newkw.setdefault('version', options['version'])
result = api.Command[name](*a, **newkw) result = api.Command[name](*a, **newkw)
self.info( logger.info(
'%s: batch: %s(%s): SUCCESS', '%s: batch: %s(%s): SUCCESS',
getattr(context, 'principal', 'UNKNOWN'), getattr(context, 'principal', 'UNKNOWN'),
name, name,
@@ -130,13 +134,13 @@ class batch(Command):
except Exception as e: except Exception as e:
if isinstance(e, errors.RequirementError) or \ if isinstance(e, errors.RequirementError) or \
isinstance(e, errors.CommandError): isinstance(e, errors.CommandError):
self.info( logger.info(
'%s: batch: %s', '%s: batch: %s',
context.principal, # pylint: disable=no-member context.principal, # pylint: disable=no-member
e.__class__.__name__ e.__class__.__name__
) )
else: else:
self.info( logger.info(
'%s: batch: %s(%s): %s', '%s: batch: %s(%s): %s',
context.principal, name, # pylint: disable=no-member context.principal, name, # pylint: disable=no-member
', '.join(api.Command[name]._repr_iter(**params)), ', '.join(api.Command[name]._repr_iter(**params)),

View File

@@ -22,6 +22,7 @@
import base64 import base64
import collections import collections
import datetime import datetime
import logging
from operator import attrgetter from operator import attrgetter
import os import os
@@ -148,6 +149,8 @@ http://www.ietf.org/rfc/rfc5280.txt
""") """)
logger = logging.getLogger(__name__)
USER, HOST, KRBTGT, SERVICE = range(4) USER, HOST, KRBTGT, SERVICE = range(4)
register = Registry() register = Registry()
@@ -921,8 +924,8 @@ class cert_request(Create, BaseCertMethod, VirtualCommand):
elif principal_type == USER: elif principal_type == USER:
api.Command['user_mod'](principal.username, **kwargs) api.Command['user_mod'](principal.username, **kwargs)
elif principal_type == KRBTGT: elif principal_type == KRBTGT:
self.log.error("Profiles used to store cert should't be " logger.error("Profiles used to store cert should't be "
"used for krbtgt certificates") "used for krbtgt certificates")
if 'certificate_chain' in ca_obj: if 'certificate_chain' in ca_obj:
cert = x509.load_certificate(result['certificate']) cert = x509.load_certificate(result['certificate'])
@@ -1194,7 +1197,8 @@ class cert_show(Retrieve, CertMethod, VirtualCommand):
try: try:
self.check_access() self.check_access()
except errors.ACIError as acierr: except errors.ACIError as acierr:
self.debug("Not granted by ACI to retrieve certificate, looking at principal") logger.debug("Not granted by ACI to retrieve certificate, "
"looking at principal")
if not bind_principal_can_manage_cert(cert): if not bind_principal_can_manage_cert(cert):
raise acierr # pylint: disable=E0702 raise acierr # pylint: disable=E0702
@@ -1264,7 +1268,8 @@ class cert_revoke(PKQuery, CertMethod, VirtualCommand):
try: try:
self.check_access() self.check_access()
except errors.ACIError as acierr: except errors.ACIError as acierr:
self.debug("Not granted by ACI to revoke certificate, looking at principal") logger.debug("Not granted by ACI to revoke certificate, "
"looking at principal")
try: try:
cert = x509.load_certificate(resp['result']['certificate']) cert = x509.load_certificate(resp['result']['certificate'])
if not bind_principal_can_manage_cert(cert): if not bind_principal_can_manage_cert(cert):

View File

@@ -18,6 +18,8 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64 import base64
import logging
import dbus import dbus
import six import six
@@ -44,6 +46,8 @@ from ipaserver.plugins.service import validate_certificate
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
__doc__ = _(""" __doc__ = _("""
Certificate Identity Mapping Certificate Identity Mapping
""") + _(""" """) + _("""
@@ -403,14 +407,14 @@ class _sssd(object):
""" """
Auxiliary class for SSSD infopipe DBus. Auxiliary class for SSSD infopipe DBus.
""" """
def __init__(self, log): def __init__(self):
""" """
Initialize the Users object and interface. Initialize the Users object and interface.
:raise RemoteRetrieveError: if DBus error occurs :raise RemoteRetrieveError: if DBus error occurs
""" """
try: try:
self.log = log self.log = logger
self._bus = dbus.SystemBus() self._bus = dbus.SystemBus()
self._users_obj = self._bus.get_object( self._users_obj = self._bus.get_object(
DBUS_SSSD_NAME, DBUS_SSSD_USERS_PATH) DBUS_SSSD_NAME, DBUS_SSSD_USERS_PATH)
@@ -535,7 +539,7 @@ class certmap_match(Search):
FreeIPA domain and trusted domains. FreeIPA domain and trusted domains.
:raise RemoteRetrieveError: if DBus returns an exception :raise RemoteRetrieveError: if DBus returns an exception
""" """
sssd = _sssd(self.log) sssd = _sssd()
cert = args[0] cert = args[0]
users = sssd.list_users_by_cert(cert) users = sssd.list_users_by_cert(cert)

View File

@@ -20,6 +20,8 @@
from __future__ import absolute_import from __future__ import absolute_import
import logging
import netaddr import netaddr
import time import time
import re import re
@@ -311,6 +313,8 @@ server:
ipa dnsconfig-mod --forwarder=203.0.113.113 ipa dnsconfig-mod --forwarder=203.0.113.113
""") """)
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
# supported resource record types # supported resource record types
@@ -1542,7 +1546,7 @@ def __dns_record_options_iter():
_dns_record_options = tuple(__dns_record_options_iter()) _dns_record_options = tuple(__dns_record_options_iter())
def check_ns_rec_resolvable(zone, name, log): def check_ns_rec_resolvable(zone, name):
assert isinstance(zone, DNSName) assert isinstance(zone, DNSName)
assert isinstance(name, DNSName) assert isinstance(name, DNSName)
@@ -2772,8 +2776,7 @@ class dnszone_add(DNSZoneBase_add):
# verify if user specified server is resolvable # verify if user specified server is resolvable
if not options['skip_nameserver_check']: if not options['skip_nameserver_check']:
check_ns_rec_resolvable(keys[0], entry_attrs['idnssoamname'], check_ns_rec_resolvable(keys[0], entry_attrs['idnssoamname'])
self.log)
# show warning about --name-server option # show warning about --name-server option
context.show_warning_nameserver_option = True context.show_warning_nameserver_option = True
else: else:
@@ -2871,7 +2874,7 @@ class dnszone_mod(DNSZoneBase_mod):
nameserver = entry_attrs['idnssoamname'] nameserver = entry_attrs['idnssoamname']
if nameserver: if nameserver:
if not nameserver.is_empty() and not options['force']: if not nameserver.is_empty() and not options['force']:
check_ns_rec_resolvable(keys[0], nameserver, self.log) check_ns_rec_resolvable(keys[0], nameserver)
context.show_warning_nameserver_option = True context.show_warning_nameserver_option = True
else: else:
# empty value, this option is required by ldap # empty value, this option is required by ldap
@@ -3044,7 +3047,7 @@ class dnsrecord(LDAPObject):
if options.get('force', False) or nsrecords is None: if options.get('force', False) or nsrecords is None:
return return
for nsrecord in nsrecords: for nsrecord in nsrecords:
check_ns_rec_resolvable(keys[0], DNSName(nsrecord), self.log) check_ns_rec_resolvable(keys[0], DNSName(nsrecord))
def _idnsname_pre_callback(self, ldap, dn, entry_attrs, *keys, **options): def _idnsname_pre_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN) assert isinstance(dn, DN)
@@ -3350,8 +3353,8 @@ class dnsrecord(LDAPObject):
ldap_rrsets[rdtype] = ldap_rrset ldap_rrsets[rdtype] = ldap_rrset
except dns.exception.SyntaxError as e: except dns.exception.SyntaxError as e:
self.log.error('DNS syntax error: %s %s %s: %s', dns_name, logger.error('DNS syntax error: %s %s %s: %s', dns_name,
dns.rdatatype.to_text(rdtype), value, e) dns.rdatatype.to_text(rdtype), value, e)
raise raise
return ldap_rrsets return ldap_rrsets
@@ -3376,14 +3379,14 @@ class dnsrecord(LDAPObject):
warn_attempts = max_attempts // 2 warn_attempts = max_attempts // 2
period = 1 # second period = 1 # second
attempt = 0 attempt = 0
log_fn = self.log.debug log_fn = logger.debug
log_fn('querying DNS server: expecting answer {%s}', ldap_rrset) log_fn('querying DNS server: expecting answer {%s}', ldap_rrset)
wait_template = 'waiting for DNS answer {%s}: got {%s} (attempt %s); '\ wait_template = 'waiting for DNS answer {%s}: got {%s} (attempt %s); '\
'waiting %s seconds before next try' 'waiting %s seconds before next try'
while attempt < max_attempts: while attempt < max_attempts:
if attempt >= warn_attempts: if attempt >= warn_attempts:
log_fn = self.log.warning log_fn = logger.warning
attempt += 1 attempt += 1
try: try:
dns_answer = resolver.query(dns_name, rdtype, dns_answer = resolver.query(dns_name, rdtype,
@@ -3453,14 +3456,14 @@ class dnsrecord(LDAPObject):
else: else:
e = errors.DNSDataMismatch(expected=ldap_rrset, e = errors.DNSDataMismatch(expected=ldap_rrset,
got="NXDOMAIN") got="NXDOMAIN")
self.log.error(e) logger.error('%s', e)
raise e raise e
except dns.resolver.NoNameservers as e: except dns.resolver.NoNameservers as e:
# Do not raise exception if we have got SERVFAILs. # Do not raise exception if we have got SERVFAILs.
# Maybe the user has created an invalid zone intentionally. # Maybe the user has created an invalid zone intentionally.
self.log.warning('waiting for DNS answer {%s}: got {%s}; ' logger.warning('waiting for DNS answer {%s}: got {%s}; '
'ignoring', ldap_rrset, type(e)) 'ignoring', ldap_rrset, type(e))
continue continue
except dns.exception.DNSException as e: except dns.exception.DNSException as e:
@@ -3469,7 +3472,7 @@ class dnsrecord(LDAPObject):
if err_str: if err_str:
err_desc += ": %s" % err_str err_desc += ": %s" % err_str
e = errors.DNSDataMismatch(expected=ldap_rrset, got=err_desc) e = errors.DNSDataMismatch(expected=ldap_rrset, got=err_desc)
self.log.error(e) logger.error('%s', e)
raise e raise e
def wait_for_modified_entries(self, entries): def wait_for_modified_entries(self, entries):
@@ -4211,7 +4214,7 @@ class dnsconfig_mod(LDAPUpdate):
# forwarders were changed # forwarders were changed
for forwarder in forwarders: for forwarder in forwarders:
try: try:
validate_dnssec_global_forwarder(forwarder, log=self.log) validate_dnssec_global_forwarder(forwarder)
except DNSSECSignatureMissingError as e: except DNSSECSignatureMissingError as e:
messages.add_message( messages.add_message(
options['version'], options['version'],
@@ -4286,8 +4289,7 @@ class dnsforwardzone(DNSZoneBase):
for forwarder in forwarders: for forwarder in forwarders:
try: try:
validate_dnssec_zone_forwarder_step1(forwarder, fwzone, validate_dnssec_zone_forwarder_step1(forwarder, fwzone)
log=self.log)
except UnresolvableRecordError as e: except UnresolvableRecordError as e:
messages.add_message( messages.add_message(
options['version'], options['version'],
@@ -4320,8 +4322,8 @@ class dnsforwardzone(DNSZoneBase):
if not ipa_dns_masters: if not ipa_dns_masters:
# something very bad happened, DNS is installed, but no IPA DNS # something very bad happened, DNS is installed, but no IPA DNS
# servers available # servers available
self.log.error("No IPA DNS server can be found, but integrated DNS " logger.error("No IPA DNS server can be found, but integrated DNS "
"is installed") "is installed")
return return
ipa_dns_ip = None ipa_dns_ip = None
@@ -4335,7 +4337,7 @@ class dnsforwardzone(DNSZoneBase):
break break
if not ipa_dns_ip: if not ipa_dns_ip:
self.log.error("Cannot resolve %s hostname", ipa_dns_masters[0]) logger.error("Cannot resolve %s hostname", ipa_dns_masters[0])
return return
# sleep a bit, adding new zone to BIND from LDAP may take a while # sleep a bit, adding new zone to BIND from LDAP may take a while
@@ -4344,8 +4346,7 @@ class dnsforwardzone(DNSZoneBase):
# Test if IPA is able to receive replies from forwarders # Test if IPA is able to receive replies from forwarders
try: try:
validate_dnssec_zone_forwarder_step2(ipa_dns_ip, fwzone, validate_dnssec_zone_forwarder_step2(ipa_dns_ip, fwzone)
log=self.log)
except DNSSECValidationError as e: except DNSSECValidationError as e:
messages.add_message( messages.add_message(
options['version'], options['version'],

View File

@@ -241,6 +241,8 @@ digits and nothing else follows.
import datetime import datetime
import json import json
import logging
from lxml import etree from lxml import etree
import time import time
import contextlib import contextlib
@@ -262,6 +264,8 @@ if api.env.in_server:
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
# These are general status return values used when # These are general status return values used when
# CMSServlet.outputError() is invoked. # CMSServlet.outputError() is invoked.
CMS_SUCCESS = 0 CMS_SUCCESS = 0
@@ -1387,7 +1391,7 @@ class ra(rabase.rabase, RestClient):
if detail is not None: if detail is not None:
err_msg = u'%s (%s)' % (err_msg, detail) err_msg = u'%s (%s)' % (err_msg, detail)
self.error('%s.%s(): %s', type(self).__name__, func_name, err_msg) logger.error('%s.%s(): %s', type(self).__name__, func_name, err_msg)
raise errors.CertificateOperationError(error=err_msg) raise errors.CertificateOperationError(error=err_msg)
def _request(self, url, port, **kw): def _request(self, url, port, **kw):
@@ -1434,7 +1438,7 @@ class ra(rabase.rabase, RestClient):
self.raise_certificate_operation_error('get_parse_result_xml', self.raise_certificate_operation_error('get_parse_result_xml',
detail=str(e)) detail=str(e))
result = parse_func(doc) result = parse_func(doc)
self.debug( logger.debug(
"%s() xml_text:\n%r\nparse_result:\n%r", "%s() xml_text:\n%r\nparse_result:\n%r",
parse_func.__name__, xml_text, result) parse_func.__name__, xml_text, result)
return result return result
@@ -1473,7 +1477,7 @@ class ra(rabase.rabase, RestClient):
""" """
self.debug('%s.check_request_status()', type(self).__name__) logger.debug('%s.check_request_status()', type(self).__name__)
# Call CMS # Call CMS
http_status, _http_headers, http_body = ( http_status, _http_headers, http_body = (
@@ -1554,7 +1558,7 @@ class ra(rabase.rabase, RestClient):
""" """
self.debug('%s.get_certificate()', type(self).__name__) logger.debug('%s.get_certificate()', type(self).__name__)
# Convert serial number to integral type from string to properly handle # Convert serial number to integral type from string to properly handle
# radix issues. Note: the int object constructor will properly handle large # radix issues. Note: the int object constructor will properly handle large
@@ -1621,7 +1625,7 @@ class ra(rabase.rabase, RestClient):
``unicode``, decimal representation ``unicode``, decimal representation
""" """
self.debug('%s.request_certificate()', type(self).__name__) logger.debug('%s.request_certificate()', type(self).__name__)
# Call CMS # Call CMS
template = u'''<?xml version="1.0" encoding="UTF-8" standalone="yes"?> template = u'''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
@@ -1711,7 +1715,7 @@ class ra(rabase.rabase, RestClient):
+---------------+---------------+---------------+ +---------------+---------------+---------------+
""" """
self.debug('%s.revoke_certificate()', type(self).__name__) logger.debug('%s.revoke_certificate()', type(self).__name__)
if type(revocation_reason) is not int: if type(revocation_reason) is not int:
raise TypeError(TYPE_ERROR % ('revocation_reason', int, revocation_reason, type(revocation_reason))) raise TypeError(TYPE_ERROR % ('revocation_reason', int, revocation_reason, type(revocation_reason)))
@@ -1773,7 +1777,7 @@ class ra(rabase.rabase, RestClient):
+---------------+---------------+---------------+ +---------------+---------------+---------------+
""" """
self.debug('%s.take_certificate_off_hold()', type(self).__name__) logger.debug('%s.take_certificate_off_hold()', type(self).__name__)
# Convert serial number to integral type from string to properly handle # Convert serial number to integral type from string to properly handle
# radix issues. Note: the int object constructor will properly handle large # radix issues. Note: the int object constructor will properly handle large
@@ -1825,7 +1829,7 @@ class ra(rabase.rabase, RestClient):
ts = time.strptime(value, '%Y-%m-%d') ts = time.strptime(value, '%Y-%m-%d')
return int(time.mktime(ts) * 1000) return int(time.mktime(ts) * 1000)
self.debug('%s.find()', type(self).__name__) logger.debug('%s.find()', type(self).__name__)
# Create the root element # Create the root element
page = etree.Element('CertSearchRequest') page = etree.Element('CertSearchRequest')
@@ -1901,7 +1905,7 @@ class ra(rabase.rabase, RestClient):
e.text = str(booloptions[opt]).lower() e.text = str(booloptions[opt]).lower()
payload = etree.tostring(doc, pretty_print=False, xml_declaration=True, encoding='UTF-8') payload = etree.tostring(doc, pretty_print=False, xml_declaration=True, encoding='UTF-8')
self.debug('%s.find(): request: %s', type(self).__name__, payload) logger.debug('%s.find(): request: %s', type(self).__name__, payload)
url = 'http://%s/ca/rest/certs/search?size=%d' % ( url = 'http://%s/ca/rest/certs/search?size=%d' % (
ipautil.format_netloc(self.ca_host, 80), ipautil.format_netloc(self.ca_host, 80),
@@ -1915,7 +1919,7 @@ class ra(rabase.rabase, RestClient):
try: try:
response = opener.open(req) response = opener.open(req)
except urllib.error.HTTPError as e: except urllib.error.HTTPError as e:
self.debug('HTTP Response code: %d' % e.getcode()) logger.debug('HTTP Response code: %d', e.getcode())
if e.getcode() == 501: if e.getcode() == 501:
self.raise_certificate_operation_error('find', self.raise_certificate_operation_error('find',
detail=_('find not supported on CAs upgraded from 9 to 10')) detail=_('find not supported on CAs upgraded from 9 to 10'))
@@ -1926,7 +1930,7 @@ class ra(rabase.rabase, RestClient):
detail=e.reason) detail=e.reason)
data = response.readlines() data = response.readlines()
self.debug('%s.find(): response: %s', type(self).__name__, data) logger.debug('%s.find(): response: %s', type(self).__name__, data)
parser = etree.XMLParser() parser = etree.XMLParser()
try: try:
doc = etree.fromstring(data[0], parser) doc = etree.fromstring(data[0], parser)
@@ -2128,7 +2132,7 @@ class ra_lightweight_ca(RestClient):
try: try:
return json.loads(ipautil.decode_json(resp_body)) return json.loads(ipautil.decode_json(resp_body))
except Exception as e: except Exception as e:
self.log.debug(e, exc_info=True) logger.debug('%s', e, exc_info=True)
raise errors.RemoteRetrieveError( raise errors.RemoteRetrieveError(
reason=_("Response from CA was not valid JSON")) reason=_("Response from CA was not valid JSON"))
@@ -2138,7 +2142,7 @@ class ra_lightweight_ca(RestClient):
try: try:
return json.loads(ipautil.decode_json(resp_body)) return json.loads(ipautil.decode_json(resp_body))
except Exception as e: except Exception as e:
self.log.debug(e, exc_info=True) logger.debug('%s', e, exc_info=True)
raise errors.RemoteRetrieveError( raise errors.RemoteRetrieveError(
reason=_("Response from CA was not valid JSON")) reason=_("Response from CA was not valid JSON"))

View File

@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipalib import api, errors, output, util from ipalib import api, errors, output, util
from ipalib import Command, Str, Flag, Int from ipalib import Command, Str, Flag, Int
from ipalib import _ from ipalib import _
@@ -213,6 +215,8 @@ EXAMPLES:
Not matched rules: can_login Not matched rules: can_login
""") """)
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
def _convert_to_ipa_rule(rule): def _convert_to_ipa_rule(rule):
@@ -479,10 +483,11 @@ class hbactest(Command):
code, rule_name = e.args code, rule_name = e.args
if code == pyhbac.HBAC_EVAL_ERROR: if code == pyhbac.HBAC_EVAL_ERROR:
error_rules.append(rule_name) error_rules.append(rule_name)
self.log.info('Native IPA HBAC rule "%s" parsing error: %s' % \ logger.info('Native IPA HBAC rule "%s" parsing error: '
(rule_name, pyhbac.hbac_result_string(code))) '%s',
rule_name, pyhbac.hbac_result_string(code))
except (TypeError, IOError) as info: except (TypeError, IOError) as info:
self.log.error('Native IPA HBAC module error: %s' % info) logger.error('Native IPA HBAC module error: %s', info)
access_granted = len(matched_rules) > 0 access_granted = len(matched_rules) > 0
else: else:

View File

@@ -20,6 +20,8 @@
from __future__ import absolute_import from __future__ import absolute_import
import logging
import dns.resolver import dns.resolver
import six import six
@@ -133,6 +135,8 @@ EXAMPLES:
ipa host-allow-create-keytab test2 --users=tuser1 ipa host-allow-create-keytab test2 --users=tuser1
""") """)
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -1073,7 +1077,7 @@ class host_find(LDAPSearch):
reason=e, reason=e,
) )
) )
self.log.error("Invalid certificate: {err}".format(err=e)) logger.error("Invalid certificate: %s", e)
del(entry_attrs['usercertificate']) del(entry_attrs['usercertificate'])
set_kerberos_attrs(entry_attrs, options) set_kerberos_attrs(entry_attrs, options)

View File

@@ -21,6 +21,8 @@
Joining an IPA domain Joining an IPA domain
""" """
import logging
import six import six
from ipalib import Registry, api from ipalib import Registry, api
@@ -32,6 +34,8 @@ from ipaserver.install import installutils
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -100,16 +104,16 @@ class join(Command):
dn = attrs_list['dn'] dn = attrs_list['dn']
# No error raised so far means that host entry exists # No error raised so far means that host entry exists
self.log.info('Host entry for %s already exists, ' logger.info('Host entry for %s already exists, '
'joining may fail on the client side ' 'joining may fail on the client side '
'if not forced', hostname) 'if not forced', hostname)
# If no principal name is set yet we need to try to add # If no principal name is set yet we need to try to add
# one. # one.
if 'krbprincipalname' not in attrs_list: if 'krbprincipalname' not in attrs_list:
service = "host/%s@%s" % (hostname, api.env.realm) service = "host/%s@%s" % (hostname, api.env.realm)
api.Command['host_mod'](hostname, krbprincipalname=service) api.Command['host_mod'](hostname, krbprincipalname=service)
self.log.info('No principal set, setting to %s', service) logger.info('No principal set, setting to %s', service)
# It exists, can we write the password attributes? # It exists, can we write the password attributes?
allowed = ldap.can_write(dn, 'krblastpwdchange') allowed = ldap.can_write(dn, 'krblastpwdchange')

View File

@@ -27,6 +27,7 @@ Backend plugin for LDAP.
# binding encodes them into the appropriate representation. This applies to # binding encodes them into the appropriate representation. This applies to
# everything except the CrudBackend methods, where dn is part of the entry dict. # everything except the CrudBackend methods, where dn is part of the entry dict.
import logging
import os import os
import ldap as _ldap import ldap as _ldap
@@ -56,6 +57,8 @@ from ipalib import Registry, errors, _
from ipalib.crud import CrudBackend from ipalib.crud import CrudBackend
from ipalib.request import context from ipalib.request import context
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
_missing = object() _missing = object()
@@ -402,7 +405,7 @@ class ldap2(CrudBackend, LDAPClient):
assert isinstance(dn, DN) assert isinstance(dn, DN)
assert isinstance(group_dn, DN) assert isinstance(group_dn, DN)
self.log.debug( logger.debug(
"add_entry_to_group: dn=%s group_dn=%s member_attr=%s", "add_entry_to_group: dn=%s group_dn=%s member_attr=%s",
dn, group_dn, member_attr) dn, group_dn, member_attr)
@@ -432,7 +435,7 @@ class ldap2(CrudBackend, LDAPClient):
assert isinstance(dn, DN) assert isinstance(dn, DN)
assert isinstance(group_dn, DN) assert isinstance(group_dn, DN)
self.log.debug( logger.debug(
"remove_entry_from_group: dn=%s group_dn=%s member_attr=%s", "remove_entry_from_group: dn=%s group_dn=%s member_attr=%s",
dn, group_dn, member_attr) dn, group_dn, member_attr)

View File

@@ -17,6 +17,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import re import re
from ldap import MOD_ADD from ldap import MOD_ADD
from ldap import SCOPE_BASE, SCOPE_ONELEVEL, SCOPE_SUBTREE from ldap import SCOPE_BASE, SCOPE_ONELEVEL, SCOPE_SUBTREE
@@ -132,6 +133,8 @@ for each user added plus a summary when the default user group is
updated. updated.
""") """)
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
# USER MIGRATION CALLBACKS AND VARS # USER MIGRATION CALLBACKS AND VARS
@@ -758,10 +761,9 @@ migration process might be incomplete\n''')
truncated = False truncated = False
entries = [] entries = []
if truncated: if truncated:
self.log.error( logger.error(
'%s: %s' % ( '%s: %s',
ldap_obj.name, self.truncated_err_msg ldap_obj.name, self.truncated_err_msg
)
) )
blacklists = {} blacklists = {}

View File

@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import six import six
from ipalib import api, errors, krb_utils from ipalib import api, errors, krb_utils
@@ -54,6 +56,8 @@ EXAMPLES:
ipa passwd tuser1 ipa passwd tuser1
""") """)
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
# We only need to prompt for the current password when changing a password # We only need to prompt for the current password when changing a password
@@ -134,7 +138,8 @@ class passwd(Command):
if principal == getattr(context, 'principal') and \ if principal == getattr(context, 'principal') and \
current_password == MAGIC_VALUE: current_password == MAGIC_VALUE:
# No cheating # No cheating
self.log.warning('User attempted to change password using magic value') logger.warning('User attempted to change password using magic '
'value')
raise errors.ACIError(info=_('Invalid credentials')) raise errors.ACIError(info=_('Invalid credentials'))
if current_password == MAGIC_VALUE: if current_password == MAGIC_VALUE:

View File

@@ -17,6 +17,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import re import re
import traceback import traceback
@@ -103,6 +104,8 @@ EXAMPLES:
ipa permission-add --attrs=member --permissions=write --type=group "Manage Group Members" ipa permission-add --attrs=member --permissions=write --type=group "Manage Group Members"
""") """)
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
_DEPRECATED_OPTION_ALIASES = { _DEPRECATED_OPTION_ALIASES = {
@@ -610,7 +613,7 @@ class permission(baseldap.LDAPObject):
location = permission_entry.single_value.get('ipapermlocation', location = permission_entry.single_value.get('ipapermlocation',
self.api.env.basedn) self.api.env.basedn)
self.log.debug('Adding ACI %r to %s' % (acistring, location)) logger.debug('Adding ACI %r to %s', acistring, location)
try: try:
entry = ldap.get_entry(location, ['aci']) entry = ldap.get_entry(location, ['aci'])
except errors.NotFound: except errors.NotFound:
@@ -654,15 +657,15 @@ class permission(baseldap.LDAPObject):
acidn = acientry.dn # pylint: disable=E1103 acidn = acientry.dn # pylint: disable=E1103
if acistring is not None: if acistring is not None:
self.log.debug('Removing ACI %r from %s' % (acistring, acidn)) logger.debug('Removing ACI %r from %s', acistring, acidn)
acientry['aci'].remove(acistring) acientry['aci'].remove(acistring)
if new_acistring: if new_acistring:
self.log.debug('Adding ACI %r to %s' % (new_acistring, acidn)) logger.debug('Adding ACI %r to %s', new_acistring, acidn)
acientry.setdefault('aci', []).append(new_acistring) acientry.setdefault('aci', []).append(new_acistring)
try: try:
ldap.update_entry(acientry) ldap.update_entry(acientry)
except errors.EmptyModlist: except errors.EmptyModlist:
self.log.debug('No changes to ACI') logger.debug('No changes to ACI')
return acientry, acistring return acientry, acistring
def _get_aci_entry_and_string(self, permission_entry, name=None, def _get_aci_entry_and_string(self, permission_entry, name=None,
@@ -696,8 +699,8 @@ class permission(baseldap.LDAPObject):
try: try:
aci = ACI(acistring) aci = ACI(acistring)
except SyntaxError as e: except SyntaxError as e:
self.log.warning('Unparseable ACI %s: %s (at %s)', logger.warning('Unparseable ACI %s: %s (at %s)',
acistring, e, location) acistring, e, location)
continue continue
if aci.name == wanted_aciname: if aci.name == wanted_aciname:
return acientry, acistring return acientry, acistring
@@ -1194,7 +1197,7 @@ class permission_mod(baseldap.LDAPUpdate):
try: try:
context.old_aci_info = self.obj.remove_aci(old_entry) context.old_aci_info = self.obj.remove_aci(old_entry)
except errors.NotFound as e: except errors.NotFound as e:
self.log.error('permission ACI not found: %s' % e) logger.error('permission ACI not found: %s', e)
# To pass data to postcallback, we currently need to use the context # To pass data to postcallback, we currently need to use the context
context.old_entry = old_entry context.old_entry = old_entry
@@ -1212,8 +1215,8 @@ class permission_mod(baseldap.LDAPUpdate):
# Try to roll back the old ACI # Try to roll back the old ACI
entry, old_aci_string = old_aci_info entry, old_aci_string = old_aci_info
if old_aci_string: if old_aci_string:
self.log.warning('Reverting ACI on %s to %s' % (entry.dn, logger.warning('Reverting ACI on %s to %s', entry.dn,
old_aci_string)) old_aci_string)
entry['aci'].append(old_aci_string) entry['aci'].append(old_aci_string)
self.Backend.ldap2.update_entry(entry) self.Backend.ldap2.update_entry(entry)
@@ -1229,8 +1232,8 @@ class permission_mod(baseldap.LDAPUpdate):
# Don't revert attribute which doesn't exist in LDAP # Don't revert attribute which doesn't exist in LDAP
entry.pop('attributelevelrights', None) entry.pop('attributelevelrights', None)
self.log.error('Error updating ACI: %s' % traceback.format_exc()) logger.error('Error updating ACI: %s', traceback.format_exc())
self.log.warning('Reverting entry') logger.warning('Reverting entry')
old_entry.reset_modlist(entry) old_entry.reset_modlist(entry)
ldap.update_entry(old_entry) ldap.update_entry(old_entry)
self._revert_aci() self._revert_aci()
@@ -1326,7 +1329,7 @@ class permission_find(baseldap.LDAPSearch):
root_entry = ldap.get_entry(DN(api.env.basedn), ['aci']) root_entry = ldap.get_entry(DN(api.env.basedn), ['aci'])
except errors.NotFound: except errors.NotFound:
legacy_entries = () legacy_entries = ()
self.log.debug('potential legacy entries: %s', len(legacy_entries)) logger.debug('potential legacy entries: %s', len(legacy_entries))
nonlegacy_names = {e.single_value['cn'] for e in entries} nonlegacy_names = {e.single_value['cn'] for e in entries}
for entry in legacy_entries: for entry in legacy_entries:
if entry.single_value['cn'] in nonlegacy_names: if entry.single_value['cn'] in nonlegacy_names:

View File

@@ -18,6 +18,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from ipalib import api from ipalib import api
from ipalib import Int, Str, DNParam from ipalib import Int, Str, DNParam
from ipalib import errors from ipalib import errors
@@ -81,6 +83,8 @@ EXAMPLES:
ipa pwpolicy-mod --minclasses=2 localadmins ipa pwpolicy-mod --minclasses=2 localadmins
""") """)
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@register() @register()
@@ -435,7 +439,7 @@ class pwpolicy_add(LDAPCreate):
def post_callback(self, ldap, dn, entry_attrs, *keys, **options): def post_callback(self, ldap, dn, entry_attrs, *keys, **options):
assert isinstance(dn, DN) assert isinstance(dn, DN)
self.log.info('%r' % entry_attrs) logger.info('%r', entry_attrs)
# attribute rights are not allowed for pwpolicy_add # attribute rights are not allowed for pwpolicy_add
self.obj.add_cospriority(entry_attrs, keys[-1], rights=False) self.obj.add_cospriority(entry_attrs, keys[-1], rights=False)
self.obj.convert_time_for_output(entry_attrs, **options) self.obj.convert_time_for_output(entry_attrs, **options)

View File

@@ -2,6 +2,8 @@
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license # Copyright (C) 2015 FreeIPA Contributors see COPYING for license
# #
import logging
import dbus import dbus
import dbus.mainloop.glib import dbus.mainloop.glib
import ldap import ldap
@@ -41,6 +43,8 @@ EXAMPLES:
ipa server-show ipa.example.com ipa server-show ipa.example.com
""") """)
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -575,9 +579,9 @@ class server_del(LDAPDelete):
mod = [(ldap.MOD_DELETE, 'memberPrincipal', member_principal)] mod = [(ldap.MOD_DELETE, 'memberPrincipal', member_principal)]
conn.conn.modify_s(str(dn), mod) conn.conn.modify_s(str(dn), mod)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE): except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
self.log.debug( logger.debug(
"Replica (%s) memberPrincipal (%s) not found in %s" % "Replica (%s) memberPrincipal (%s) not found in %s",
(master, member_principal, dn)) master, member_principal, dn)
except Exception as e: except Exception as e:
self.add_message( self.add_message(
messages.ServerRemovalWarning( messages.ServerRemovalWarning(
@@ -803,11 +807,11 @@ class server_del(LDAPDelete):
return return
time.sleep(2) time.sleep(2)
if i == 2: # taking too long, something is wrong, report if i == 2: # taking too long, something is wrong, report
self.log.info( logger.info(
"Waiting for removal of replication agreements") "Waiting for removal of replication agreements")
if i > 90: if i > 90:
self.log.info("Taking too long, skipping") logger.info("Taking too long, skipping")
self.log.info("Following segments were not deleted:") logger.info("Following segments were not deleted:")
self.add_message(messages.ServerRemovalWarning( self.add_message(messages.ServerRemovalWarning(
message=_("Following segments were not deleted:"))) message=_("Following segments were not deleted:")))
for s in left: for s in left:
@@ -833,14 +837,14 @@ class server_del(LDAPDelete):
# If the server was already deleted, we can expect that all # If the server was already deleted, we can expect that all
# removals had been done in previous run and dangling segments # removals had been done in previous run and dangling segments
# were not deleted. # were not deleted.
self.log.info( logger.info(
"Skipping replication agreement deletion check for " "Skipping replication agreement deletion check for "
"suffix '{0}'".format(suffix_name)) "suffix '%s'", suffix_name)
continue continue
self.log.info( logger.info(
"Checking for deleted segments in suffix '{0}'".format( "Checking for deleted segments in suffix '%s",
suffix_name)) suffix_name)
wait_for_segment_removal( wait_for_segment_removal(
hostname, hostname,

View File

@@ -19,6 +19,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import hashes
import six import six
@@ -116,6 +118,8 @@ EXAMPLES:
""") """)
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
output_params = ( output_params = (
@@ -791,7 +795,7 @@ class service_find(LDAPSearch):
reason=e reason=e
) )
) )
self.log.error("Invalid certificate: {err}".format(err=e)) logger.error("Invalid certificate: %s", e)
del(entry_attrs['usercertificate']) del(entry_attrs['usercertificate'])
set_kerberos_attrs(entry_attrs, options) set_kerberos_attrs(entry_attrs, options)
@@ -829,7 +833,7 @@ class service_show(LDAPRetrieve):
reason=e, reason=e,
) )
) )
self.log.error("Invalid certificate: {err}".format(err=e)) logger.error("Invalid certificate: %s", e)
del(entry_attrs['usercertificate']) del(entry_attrs['usercertificate'])
set_kerberos_attrs(entry_attrs, options) set_kerberos_attrs(entry_attrs, options)

View File

@@ -2,10 +2,14 @@
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license # Copyright (C) 2015 FreeIPA Contributors see COPYING for license
# #
import logging
from ipalib import Command from ipalib import Command
from ipalib.request import context from ipalib.request import context
from ipalib.plugable import Registry from ipalib.plugable import Registry
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -19,7 +23,7 @@ class session_logout(Command):
def execute(self, *args, **options): def execute(self, *args, **options):
ccache_name = getattr(context, 'ccache_name', None) ccache_name = getattr(context, 'ccache_name', None)
if ccache_name is None: if ccache_name is None:
self.debug('session logout command: no ccache_name found') logger.debug('session logout command: no ccache_name found')
else: else:
delattr(context, 'ccache_name') delattr(context, 'ccache_name')

View File

@@ -17,6 +17,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import posixpath import posixpath
from copy import deepcopy from copy import deepcopy
@@ -101,6 +102,8 @@ EXAMPLES:
""") """)
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -449,7 +452,8 @@ class stageuser_find(baseuser_find):
# but then they have to create inetOrgPerson stage user # but then they have to create inetOrgPerson stage user
stagefilter = filter.replace(container_filter, stagefilter = filter.replace(container_filter,
"(|%s(objectclass=inetOrgPerson))" % container_filter) "(|%s(objectclass=inetOrgPerson))" % container_filter)
self.log.debug("stageuser_find: pre_callback new filter=%s " % (stagefilter)) logger.debug("stageuser_find: pre_callback new filter=%s ",
stagefilter)
return (stagefilter, base_dn, scope) return (stagefilter, base_dn, scope)
def post_callback(self, ldap, entries, truncated, *args, **options): def post_callback(self, ldap, entries, truncated, *args, **options):
@@ -583,9 +587,10 @@ class stageuser_activate(LDAPQuery):
(isinstance(v, unicode) and v in (u'', None)): (isinstance(v, unicode) and v in (u'', None)):
try: try:
v.decode('utf-8') v.decode('utf-8')
self.log.debug("merge: %s:%r wiped" % (attr, v)) logger.debug("merge: %s:%r wiped", attr, v)
except Exception: except Exception:
self.log.debug("merge %s: [no_print %s]" % (attr, v.__class__.__name__)) logger.debug("merge %s: [no_print %s]",
attr, v.__class__.__name__)
if isinstance(entry_to[attr], (list, tuple)): if isinstance(entry_to[attr], (list, tuple)):
# multi value attribute # multi value attribute
if v not in entry_to[attr]: if v not in entry_to[attr]:
@@ -599,9 +604,10 @@ class stageuser_activate(LDAPQuery):
else: else:
try: try:
v.decode('utf-8') v.decode('utf-8')
self.log.debug("Add: %s:%r" % (attr, v)) logger.debug("Add: %s:%r", attr, v)
except Exception: except Exception:
self.log.debug("Add %s: [no_print %s]" % (attr, v.__class__.__name__)) logger.debug("Add %s: [no_print %s]",
attr, v.__class__.__name__)
if isinstance(entry_to[attr], (list, tuple)): if isinstance(entry_to[attr], (list, tuple)):
# multi value attribute # multi value attribute
@@ -697,7 +703,7 @@ class stageuser_activate(LDAPQuery):
del result_entry['description'] del result_entry['description']
for (k, v) in new_entry_attrs.items(): for (k, v) in new_entry_attrs.items():
self.log.debug("new entry: k=%r and v=%r)" % (k, v)) logger.debug("new entry: k=%r and v=%r)", k, v)
self._build_new_entry(ldap, staging_dn, entry_attrs, new_entry_attrs) self._build_new_entry(ldap, staging_dn, entry_attrs, new_entry_attrs)
@@ -710,10 +716,12 @@ class stageuser_activate(LDAPQuery):
self._exc_wrapper(args, options, ldap.delete_entry)(staging_dn) self._exc_wrapper(args, options, ldap.delete_entry)(staging_dn)
except: except:
try: try:
self.log.error("Fail to delete the Staging user after activating it %s " % (staging_dn)) logger.error("Fail to delete the Staging user after "
"activating it %s ", staging_dn)
self._exc_wrapper(args, options, ldap.delete_entry)(active_dn) self._exc_wrapper(args, options, ldap.delete_entry)(active_dn)
except Exception: except Exception:
self.log.error("Fail to cleanup activation. The user remains active %s" % (active_dn)) logger.error("Fail to cleanup activation. The user remains "
"active %s", active_dn)
raise raise
# add the user we just created into the default primary group # add the user we just created into the default primary group

View File

@@ -18,6 +18,8 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import six import six
from ipalib.messages import ( from ipalib.messages import (
@@ -155,6 +157,8 @@ particular type.
ipa trustconfig-mod --type ad --fallback-primary-group "Default SMB Group" ipa trustconfig-mod --type ad --fallback-primary-group "Default SMB Group"
""") """)
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
# Trust type is a combination of ipanttrusttype and ipanttrustattributes # Trust type is a combination of ipanttrusttype and ipanttrustattributes
@@ -437,7 +441,8 @@ def add_range(myapi, trustinstance, range_name, dom_sid, *keys, **options):
# Return the values that were generated inside this function # Return the values that were generated inside this function
return range_type, range_size, base_id return range_type, range_size, base_id
def fetch_trusted_domains_over_dbus(myapi, log, forest_name):
def fetch_trusted_domains_over_dbus(myapi, forest_name):
if not _bindings_installed: if not _bindings_installed:
return return
# Calling oddjobd-activated service via DBus has some quirks: # Calling oddjobd-activated service via DBus has some quirks:
@@ -454,12 +459,13 @@ def fetch_trusted_domains_over_dbus(myapi, log, forest_name):
fetch_domains_method = intf.get_dbus_method('fetch_domains', dbus_interface=DBUS_IFACE_TRUST) fetch_domains_method = intf.get_dbus_method('fetch_domains', dbus_interface=DBUS_IFACE_TRUST)
(_ret, _stdout, _stderr) = fetch_domains_method(forest_name) (_ret, _stdout, _stderr) = fetch_domains_method(forest_name)
except dbus.DBusException as e: except dbus.DBusException as e:
log.error('Failed to call %(iface)s.fetch_domains helper.' logger.error('Failed to call %s.fetch_domains helper.'
'DBus exception is %(exc)s.' % dict(iface=DBUS_IFACE_TRUST, exc=str(e))) 'DBus exception is %s.', DBUS_IFACE_TRUST, str(e))
if _ret != 0: if _ret != 0:
log.error('Helper was called for forest %(forest)s, return code is %(ret)d' % dict(forest=forest_name, ret=_ret)) logger.error('Helper was called for forest %s, return code is %d',
log.error('Standard output from the helper:\n%s---\n' % (_stdout)) forest_name, _ret)
log.error('Error output from the helper:\n%s--\n' % (_stderr)) logger.error('Standard output from the helper:\n%s---\n', _stdout)
logger.error('Error output from the helper:\n%s--\n', _stderr)
raise errors.ServerCommandError(server=myapi.env.host, raise errors.ServerCommandError(server=myapi.env.host,
error=_('Fetching domains from trusted forest failed. ' error=_('Fetching domains from trusted forest failed. '
'See details in the error_log')) 'See details in the error_log'))
@@ -784,7 +790,7 @@ sides.
# object credentials to authenticate to AD with Kerberos, # object credentials to authenticate to AD with Kerberos,
# run DCE RPC calls to do discovery and will call # run DCE RPC calls to do discovery and will call
# add_new_domains_from_trust() on its own. # add_new_domains_from_trust() on its own.
fetch_trusted_domains_over_dbus(self.api, self.log, result['value']) fetch_trusted_domains_over_dbus(self.api, result['value'])
# Format the output into human-readable values unless `--raw` is given # Format the output into human-readable values unless `--raw` is given
self._format_trust_attrs(result, **options) self._format_trust_attrs(result, **options)
@@ -1743,7 +1749,7 @@ class trust_fetch_domains(LDAPRetrieve):
# With privilege separation we also cannot authenticate as # With privilege separation we also cannot authenticate as
# HTTP/ principal because we have no access to its key material. # HTTP/ principal because we have no access to its key material.
# Thus, we'll use DBus call out to oddjobd helper in all cases # Thus, we'll use DBus call out to oddjobd helper in all cases
fetch_trusted_domains_over_dbus(self.api, self.log, keys[0]) fetch_trusted_domains_over_dbus(self.api, keys[0])
result['summary'] = unicode(_('List of trust domains successfully ' result['summary'] = unicode(_('List of trust domains successfully '
'refreshed. Use trustdomain-find ' 'refreshed. Use trustdomain-find '
'command to list them.')) 'command to list them.'))

View File

@@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import time import time
from time import gmtime, strftime from time import gmtime, strftime
import posixpath import posixpath
@@ -113,6 +114,8 @@ EXAMPLES:
ipa user-del tuser1 ipa user-del tuser1
""") """)
logger = logging.getLogger(__name__)
register = Registry() register = Registry()
@@ -640,7 +643,7 @@ class user_del(baseuser_del):
dn = self.obj.get_either_dn(pkey, **options) dn = self.obj.get_either_dn(pkey, **options)
delete_dn = DN(dn[0], delete_container) delete_dn = DN(dn[0], delete_container)
ldap = self.obj.backend ldap = self.obj.backend
self.log.debug("preserve move %s -> %s" % (dn, delete_dn)) logger.debug("preserve move %s -> %s", dn, delete_dn)
if dn.endswith(delete_container): if dn.endswith(delete_container):
raise errors.ExecutionError( raise errors.ExecutionError(
@@ -1108,7 +1111,7 @@ class user_status(LDAPQuery):
) )
except errors.NotFound: except errors.NotFound:
# If this happens we have some pretty serious problems # If this happens we have some pretty serious problems
self.error('No IPA masters found!') logger.error('No IPA masters found!')
entries = [] entries = []
count = 0 count = 0
@@ -1121,7 +1124,8 @@ class user_status(LDAPQuery):
other_ldap = LDAPClient(ldap_uri='ldap://%s' % host) other_ldap = LDAPClient(ldap_uri='ldap://%s' % host)
other_ldap.gssapi_bind() other_ldap.gssapi_bind()
except Exception as e: except Exception as e:
self.error("user_status: Connecting to %s failed with %s" % (host, str(e))) logger.error("user_status: Connecting to %s failed with "
"%s", host, str(e))
newresult = {'dn': dn} newresult = {'dn': dn}
newresult['server'] = _("%(host)s failed: %(error)s") % dict(host=host, error=str(e)) newresult['server'] = _("%(host)s failed: %(error)s") % dict(host=host, error=str(e))
entries.append(newresult) entries.append(newresult)
@@ -1141,7 +1145,8 @@ class user_status(LDAPQuery):
newtime = time.strptime(newresult[attr][0], '%Y%m%d%H%M%SZ') newtime = time.strptime(newresult[attr][0], '%Y%m%d%H%M%SZ')
newresult[attr][0] = unicode(time.strftime('%Y-%m-%dT%H:%M:%SZ', newtime)) newresult[attr][0] = unicode(time.strftime('%Y-%m-%dT%H:%M:%SZ', newtime))
except Exception as e: except Exception as e:
self.debug("time conversion failed with %s" % str(e)) logger.debug("time conversion failed with %s",
str(e))
newresult['server'] = host newresult['server'] = host
if options.get('raw', False): if options.get('raw', False):
time_format = '%Y%m%d%H%M%SZ' time_format = '%Y%m%d%H%M%SZ'
@@ -1157,7 +1162,8 @@ class user_status(LDAPQuery):
except errors.NotFound: except errors.NotFound:
self.api.Object.user.handle_not_found(*keys) self.api.Object.user.handle_not_found(*keys)
except Exception as e: except Exception as e:
self.error("user_status: Retrieving status for %s failed with %s" % (dn, str(e))) logger.error("user_status: Retrieving status for %s failed "
"with %s", dn, str(e))
newresult = {'dn': dn} newresult = {'dn': dn}
newresult['server'] = _("%(host)s failed") % dict(host=host) newresult['server'] = _("%(host)s failed") % dict(host=host)
entries.append(newresult) entries.append(newresult)

View File

@@ -20,11 +20,17 @@
""" """
Base classes for non-LDAP backend plugins. Base classes for non-LDAP backend plugins.
""" """
import logging
from ipalib import Command from ipalib import Command
from ipalib import errors from ipalib import errors
from ipapython.dn import DN from ipapython.dn import DN
from ipalib.text import _ from ipalib.text import _
logger = logging.getLogger(__name__)
class VirtualCommand(Command): class VirtualCommand(Command):
""" """
A command that doesn't use the LDAP backend but wants to use the A command that doesn't use the LDAP backend but wants to use the
@@ -54,7 +60,7 @@ class VirtualCommand(Command):
operation = self.operation operation = self.operation
ldap = self.api.Backend.ldap2 ldap = self.api.Backend.ldap2
self.log.debug("IPA: virtual verify %s" % operation) logger.debug("IPA: virtual verify %s", operation)
operationdn = DN(('cn', operation), self.api.env.container_virtual, self.api.env.basedn) operationdn = DN(('cn', operation), self.api.env.container_virtual, self.api.env.basedn)

View File

@@ -23,6 +23,7 @@ RPC server.
Also see the `ipalib.rpc` module. Also see the `ipalib.rpc` module.
""" """
import logging
from xml.sax.saxutils import escape from xml.sax.saxutils import escape
import os import os
import traceback import traceback
@@ -67,6 +68,8 @@ from requests.auth import AuthBase
if six.PY3: if six.PY3:
unicode = str unicode = str
logger = logging.getLogger(__name__)
HTTP_STATUS_SUCCESS = '200 Success' HTTP_STATUS_SUCCESS = '200 Success'
HTTP_STATUS_SERVER_ERROR = '500 Internal Server Error' HTTP_STATUS_SERVER_ERROR = '500 Internal Server Error'
@@ -138,7 +141,7 @@ class HTTP_Status(plugable.Plugin):
status = '404 Not Found' status = '404 Not Found'
response_headers = [('Content-Type', 'text/html; charset=utf-8')] response_headers = [('Content-Type', 'text/html; charset=utf-8')]
self.info('%s: URL="%s", %s', status, url, message) logger.info('%s: URL="%s", %s', status, url, message)
start_response(status, response_headers) start_response(status, response_headers)
output = _not_found_template % dict(url=escape(url)) output = _not_found_template % dict(url=escape(url))
return [output.encode('utf-8')] return [output.encode('utf-8')]
@@ -150,7 +153,7 @@ class HTTP_Status(plugable.Plugin):
status = '400 Bad Request' status = '400 Bad Request'
response_headers = [('Content-Type', 'text/html; charset=utf-8')] response_headers = [('Content-Type', 'text/html; charset=utf-8')]
self.info('%s: %s', status, message) logger.info('%s: %s', status, message)
start_response(status, response_headers) start_response(status, response_headers)
output = _bad_request_template % dict(message=escape(message)) output = _bad_request_template % dict(message=escape(message))
@@ -163,7 +166,7 @@ class HTTP_Status(plugable.Plugin):
status = HTTP_STATUS_SERVER_ERROR status = HTTP_STATUS_SERVER_ERROR
response_headers = [('Content-Type', 'text/html; charset=utf-8')] response_headers = [('Content-Type', 'text/html; charset=utf-8')]
self.error('%s: %s', status, message) logger.error('%s: %s', status, message)
start_response(status, response_headers) start_response(status, response_headers)
output = _internal_error_template % dict(message=escape(message)) output = _internal_error_template % dict(message=escape(message))
@@ -178,7 +181,7 @@ class HTTP_Status(plugable.Plugin):
if reason: if reason:
response_headers.append(('X-IPA-Rejection-Reason', reason)) response_headers.append(('X-IPA-Rejection-Reason', reason))
self.info('%s: %s', status, message) logger.info('%s: %s', status, message)
start_response(status, response_headers) start_response(status, response_headers)
output = _unauthorized_template % dict(message=escape(message)) output = _unauthorized_template % dict(message=escape(message))
@@ -257,7 +260,7 @@ class wsgi_dispatch(Executioner, HTTP_Status):
return key in self.__apps return key in self.__apps
def __call__(self, environ, start_response): def __call__(self, environ, start_response):
self.debug('WSGI wsgi_dispatch.__call__:') logger.debug('WSGI wsgi_dispatch.__call__:')
try: try:
return self.route(environ, start_response) return self.route(environ, start_response)
finally: finally:
@@ -288,7 +291,7 @@ class wsgi_dispatch(Executioner, HTTP_Status):
raise Exception('%s.mount(): cannot replace %r with %r at %r' % ( raise Exception('%s.mount(): cannot replace %r with %r at %r' % (
self.name, self.__apps[key], app, key) self.name, self.__apps[key], app, key)
) )
self.debug('Mounting %r at %r', app, key) logger.debug('Mounting %r at %r', app, key)
self.__apps[key] = app self.__apps[key] = app
@@ -367,10 +370,11 @@ class WSGIExecutioner(Executioner):
result = command(*args, **options) result = command(*args, **options)
except PublicError as e: except PublicError as e:
if self.api.env.debug: if self.api.env.debug:
self.debug('WSGI wsgi_execute PublicError: %s', traceback.format_exc()) logger.debug('WSGI wsgi_execute PublicError: %s',
traceback.format_exc())
error = e error = e
except Exception as e: except Exception as e:
self.exception( logger.exception(
'non-public: %s: %s', e.__class__.__name__, str(e) 'non-public: %s: %s', e.__class__.__name__, str(e)
) )
error = InternalError() error = InternalError()
@@ -382,8 +386,9 @@ class WSGIExecutioner(Executioner):
try: try:
params = command.args_options_2_params(*args, **options) params = command.args_options_2_params(*args, **options)
except Exception as e: except Exception as e:
self.info( logger.info(
'exception %s caught when converting options: %s', e.__class__.__name__, str(e) 'exception %s caught when converting options: %s',
e.__class__.__name__, str(e)
) )
# get at least some context of what is going on # get at least some context of what is going on
params = options params = options
@@ -392,18 +397,18 @@ class WSGIExecutioner(Executioner):
result_string = type(error).__name__ result_string = type(error).__name__
else: else:
result_string = 'SUCCESS' result_string = 'SUCCESS'
self.info('[%s] %s: %s(%s): %s', logger.info('[%s] %s: %s(%s): %s',
type(self).__name__, type(self).__name__,
principal, principal,
name, name,
', '.join(command._repr_iter(**params)), ', '.join(command._repr_iter(**params)),
result_string) result_string)
else: else:
self.info('[%s] %s: %s: %s', logger.info('[%s] %s: %s: %s',
type(self).__name__, type(self).__name__,
principal, principal,
name, name,
type(error).__name__) type(error).__name__)
version = options.get('version', VERSION_WITHOUT_CAPABILITIES) version = options.get('version', VERSION_WITHOUT_CAPABILITIES)
return self.marshal(result, error, _id, version) return self.marshal(result, error, _id, version)
@@ -418,7 +423,7 @@ class WSGIExecutioner(Executioner):
WSGI application for execution. WSGI application for execution.
""" """
self.debug('WSGI WSGIExecutioner.__call__:') logger.debug('WSGI WSGIExecutioner.__call__:')
try: try:
status = HTTP_STATUS_SUCCESS status = HTTP_STATUS_SUCCESS
response = self.wsgi_execute(environ) response = self.wsgi_execute(environ)
@@ -428,7 +433,7 @@ class WSGIExecutioner(Executioner):
headers = [('Content-Type', headers = [('Content-Type',
self.content_type + '; charset=utf-8')] self.content_type + '; charset=utf-8')]
except Exception: except Exception:
self.exception('WSGI %s.__call__():', self.name) logger.exception('WSGI %s.__call__():', self.name)
status = HTTP_STATUS_SERVER_ERROR status = HTTP_STATUS_SERVER_ERROR
response = status.encode('utf-8') response = status.encode('utf-8')
headers = [('Content-Type', 'text/plain; charset=utf-8')] headers = [('Content-Type', 'text/plain; charset=utf-8')]
@@ -463,7 +468,7 @@ class jsonserver(WSGIExecutioner, HTTP_Status):
''' '''
''' '''
self.debug('WSGI jsonserver.__call__:') logger.debug('WSGI jsonserver.__call__:')
response = super(jsonserver, self).__call__(environ, start_response) response = super(jsonserver, self).__call__(environ, start_response)
return response return response
@@ -598,7 +603,7 @@ class KerberosSession(HTTP_Status):
headers = [] headers = []
response = b'' response = b''
self.debug('%s need login', status) logger.debug('%s need login', status)
start_response(status, headers) start_response(status, headers)
return [response] return [response]
@@ -607,13 +612,13 @@ class KerberosSession(HTTP_Status):
# If we have a ccache ... # If we have a ccache ...
ccache_name = environ.get('KRB5CCNAME') ccache_name = environ.get('KRB5CCNAME')
if ccache_name is None: if ccache_name is None:
self.debug('no ccache, need login') logger.debug('no ccache, need login')
return return
# ... make sure we have a name ... # ... make sure we have a name ...
principal = environ.get('GSS_NAME') principal = environ.get('GSS_NAME')
if principal is None: if principal is None:
self.debug('no Principal Name, need login') logger.debug('no Principal Name, need login')
return return
# ... and use it to resolve the ccache name (Issue: 6972 ) # ... and use it to resolve the ccache name (Issue: 6972 )
@@ -623,7 +628,8 @@ class KerberosSession(HTTP_Status):
creds = get_credentials_if_valid(name=gss_name, creds = get_credentials_if_valid(name=gss_name,
ccache_name=ccache_name) ccache_name=ccache_name)
if not creds: if not creds:
self.debug('ccache expired or invalid, deleting session, need login') logger.debug(
'ccache expired or invalid, deleting session, need login')
return return
return ccache_name return ccache_name
@@ -662,7 +668,7 @@ class KerberosWSGIExecutioner(WSGIExecutioner, KerberosSession):
super(KerberosWSGIExecutioner, self)._on_finalize() super(KerberosWSGIExecutioner, self)._on_finalize()
def __call__(self, environ, start_response): def __call__(self, environ, start_response):
self.debug('KerberosWSGIExecutioner.__call__:') logger.debug('KerberosWSGIExecutioner.__call__:')
user_ccache=environ.get('KRB5CCNAME') user_ccache=environ.get('KRB5CCNAME')
object.__setattr__( object.__setattr__(
@@ -674,7 +680,7 @@ class KerberosWSGIExecutioner(WSGIExecutioner, KerberosSession):
status = HTTP_STATUS_SERVER_ERROR status = HTTP_STATUS_SERVER_ERROR
self.log.error( logger.error(
'%s: %s', status, '%s: %s', status,
'KerberosWSGIExecutioner.__call__: ' 'KerberosWSGIExecutioner.__call__: '
'KRB5CCNAME not defined in HTTP request environment') 'KRB5CCNAME not defined in HTTP request environment')
@@ -767,11 +773,13 @@ class xmlserver(KerberosWSGIExecutioner):
def marshal(self, result, error, _id=None, def marshal(self, result, error, _id=None,
version=VERSION_WITHOUT_CAPABILITIES): version=VERSION_WITHOUT_CAPABILITIES):
if error: if error:
self.debug('response: %s: %s', error.__class__.__name__, str(error)) logger.debug('response: %s: %s',
error.__class__.__name__, str(error))
response = Fault(error.errno, error.strerror) response = Fault(error.errno, error.strerror)
else: else:
if isinstance(result, dict): if isinstance(result, dict):
self.debug('response: entries returned %d', result.get('count', 1)) logger.debug('response: entries returned %d',
result.get('count', 1))
response = (result,) response = (result,)
dump = xml_dumps(response, version, methodresponse=True) dump = xml_dumps(response, version, methodresponse=True)
return dump.encode('utf-8') return dump.encode('utf-8')
@@ -794,7 +802,7 @@ class jsonserver_session(jsonserver, KerberosSession):
''' '''
''' '''
self.debug('WSGI jsonserver_session.__call__:') logger.debug('WSGI jsonserver_session.__call__:')
# Redirect to login if no Kerberos credentials # Redirect to login if no Kerberos credentials
ccache_name = self.get_environ_creds(environ) ccache_name = self.get_environ_creds(environ)
@@ -834,7 +842,7 @@ class KerberosLogin(Backend, KerberosSession):
self.api.Backend.wsgi_dispatch.mount(self, self.key) self.api.Backend.wsgi_dispatch.mount(self, self.key)
def __call__(self, environ, start_response): def __call__(self, environ, start_response):
self.debug('WSGI KerberosLogin.__call__:') logger.debug('WSGI KerberosLogin.__call__:')
# Redirect to login if no Kerberos credentials # Redirect to login if no Kerberos credentials
user_ccache_name = self.get_environ_creds(environ) user_ccache_name = self.get_environ_creds(environ)
@@ -852,7 +860,7 @@ class login_x509(KerberosLogin):
key = '/session/login_x509' key = '/session/login_x509'
def __call__(self, environ, start_response): def __call__(self, environ, start_response):
self.debug('WSGI login_x509.__call__:') logger.debug('WSGI login_x509.__call__:')
if 'KRB5CCNAME' not in environ: if 'KRB5CCNAME' not in environ:
return self.unauthorized( return self.unauthorized(
@@ -872,7 +880,7 @@ class login_password(Backend, KerberosSession):
self.api.Backend.wsgi_dispatch.mount(self, self.key) self.api.Backend.wsgi_dispatch.mount(self, self.key)
def __call__(self, environ, start_response): def __call__(self, environ, start_response):
self.debug('WSGI login_password.__call__:') logger.debug('WSGI login_password.__call__:')
# Get the user and password parameters from the request # Get the user and password parameters from the request
content_type = environ.get('CONTENT_TYPE', '').lower() content_type = environ.get('CONTENT_TYPE', '').lower()
@@ -958,7 +966,7 @@ class login_password(Backend, KerberosSession):
armor_path = os.path.join(paths.IPA_CCACHES, armor_path = os.path.join(paths.IPA_CCACHES,
"armor_{}".format(os.getpid())) "armor_{}".format(os.getpid()))
self.debug('Obtaining armor in ccache %s', armor_path) logger.debug('Obtaining armor in ccache %s', armor_path)
try: try:
kinit_armor( kinit_armor(
@@ -966,7 +974,7 @@ class login_password(Backend, KerberosSession):
pkinit_anchors=[paths.KDC_CERT, paths.KDC_CA_BUNDLE_PEM], pkinit_anchors=[paths.KDC_CERT, paths.KDC_CA_BUNDLE_PEM],
) )
except RuntimeError as e: except RuntimeError as e:
self.error("Failed to obtain armor cache") logger.error("Failed to obtain armor cache")
# We try to continue w/o armor, 2FA will be impacted # We try to continue w/o armor, 2FA will be impacted
armor_path = None armor_path = None
@@ -980,7 +988,7 @@ class login_password(Backend, KerberosSession):
lifetime=self.api.env.kinit_lifetime) lifetime=self.api.env.kinit_lifetime)
if armor_path: if armor_path:
self.debug('Cleanup the armor ccache') logger.debug('Cleanup the armor ccache')
ipautil.run([paths.KDESTROY, '-A', '-c', armor_path], ipautil.run([paths.KDESTROY, '-A', '-c', armor_path],
env={'KRB5CCNAME': armor_path}, raiseonerr=False) env={'KRB5CCNAME': armor_path}, raiseonerr=False)
except RuntimeError as e: except RuntimeError as e:
@@ -1009,7 +1017,7 @@ class change_password(Backend, HTTP_Status):
self.api.Backend.wsgi_dispatch.mount(self, self.key) self.api.Backend.wsgi_dispatch.mount(self, self.key)
def __call__(self, environ, start_response): def __call__(self, environ, start_response):
self.info('WSGI change_password.__call__:') logger.info('WSGI change_password.__call__:')
# Get the user and password parameters from the request # Get the user and password parameters from the request
content_type = environ.get('CONTENT_TYPE', '').lower() content_type = environ.get('CONTENT_TYPE', '').lower()
@@ -1040,7 +1048,8 @@ class change_password(Backend, HTTP_Status):
return self.bad_request(environ, start_response, "no %s specified" % field) return self.bad_request(environ, start_response, "no %s specified" % field)
# start building the response # start building the response
self.info("WSGI change_password: start password change of user '%s'", data['user']) logger.info("WSGI change_password: start password change of user '%s'",
data['user'])
status = HTTP_STATUS_SUCCESS status = HTTP_STATUS_SUCCESS
response_headers = [('Content-Type', 'text/html; charset=utf-8')] response_headers = [('Content-Type', 'text/html; charset=utf-8')]
title = 'Password change rejected' title = 'Password change rejected'
@@ -1061,8 +1070,9 @@ class change_password(Backend, HTTP_Status):
message = 'The old password or username is not correct.' message = 'The old password or username is not correct.'
except Exception as e: except Exception as e:
message = "Could not connect to LDAP server." message = "Could not connect to LDAP server."
self.error("change_password: cannot authenticate '%s' to LDAP server: %s", logger.error("change_password: cannot authenticate '%s' to LDAP "
data['user'], str(e)) "server: %s",
data['user'], str(e))
else: else:
try: try:
conn.modify_password(bind_dn, data['new_password'], data['old_password'], skip_bind=True) conn.modify_password(bind_dn, data['new_password'], data['old_password'], skip_bind=True)
@@ -1072,8 +1082,9 @@ class change_password(Backend, HTTP_Status):
message = "Password change was rejected: %s" % escape(str(e)) message = "Password change was rejected: %s" % escape(str(e))
except Exception as e: except Exception as e:
message = "Could not change the password" message = "Could not change the password"
self.error("change_password: cannot change password of '%s': %s", logger.error("change_password: cannot change password of "
data['user'], str(e)) "'%s': %s",
data['user'], str(e))
else: else:
result = 'ok' result = 'ok'
title = "Password change successful" title = "Password change successful"
@@ -1082,7 +1093,7 @@ class change_password(Backend, HTTP_Status):
if conn.isconnected(): if conn.isconnected():
conn.disconnect() conn.disconnect()
self.info('%s: %s', status, message) logger.info('%s: %s', status, message)
response_headers.append(('X-IPA-Pwchange-Result', result)) response_headers.append(('X-IPA-Pwchange-Result', result))
if policy_error: if policy_error:
@@ -1178,8 +1189,9 @@ class sync_token(Backend, HTTP_Status):
except Exception as e: except Exception as e:
result = 'error' result = 'error'
message = "Could not connect to LDAP server." message = "Could not connect to LDAP server."
self.error("token_sync: cannot authenticate '%s' to LDAP server: %s", logger.error("token_sync: cannot authenticate '%s' to LDAP "
data['user'], str(e)) "server: %s",
data['user'], str(e))
finally: finally:
if conn.isconnected(): if conn.isconnected():
conn.disconnect() conn.disconnect()
@@ -1209,7 +1221,7 @@ class xmlserver_session(xmlserver, KerberosSession):
headers = [] headers = []
response = b'' response = b''
self.debug('xmlserver_session: %s need login', status) logger.debug('xmlserver_session: %s need login', status)
start_response(status, headers) start_response(status, headers)
return [response] return [response]
@@ -1218,19 +1230,20 @@ class xmlserver_session(xmlserver, KerberosSession):
''' '''
''' '''
self.debug('WSGI xmlserver_session.__call__:') logger.debug('WSGI xmlserver_session.__call__:')
ccache_name = environ.get('KRB5CCNAME') ccache_name = environ.get('KRB5CCNAME')
# Redirect to /ipa/xml if no Kerberos credentials # Redirect to /ipa/xml if no Kerberos credentials
if ccache_name is None: if ccache_name is None:
self.debug('xmlserver_session.__call_: no ccache, need TGT') logger.debug('xmlserver_session.__call_: no ccache, need TGT')
return self.need_login(start_response) return self.need_login(start_response)
# Redirect to /ipa/xml if Kerberos credentials are expired # Redirect to /ipa/xml if Kerberos credentials are expired
creds = get_credentials_if_valid(ccache_name=ccache_name) creds = get_credentials_if_valid(ccache_name=ccache_name)
if not creds: if not creds:
self.debug('xmlserver_session.__call_: ccache expired, deleting session, need login') logger.debug('xmlserver_session.__call_: ccache expired, deleting '
'session, need login')
# The request is finished with the ccache, destroy it. # The request is finished with the ccache, destroy it.
return self.need_login(start_response) return self.need_login(start_response)

View File

@@ -242,7 +242,7 @@ def mh(request, class_integration_logs):
print(mh.config) print(mh.config)
for host in mh.config.get_all_hosts(): for host in mh.config.get_all_hosts():
host.add_log_collector(collect_log) host.add_log_collector(collect_log)
cls.log.info('Preparing host %s', host.hostname) log.info('Preparing host %s', host.hostname)
tasks.prepare_host(host) tasks.prepare_host(host)
setup_class(cls, mh) setup_class(cls, mh)

View File

@@ -84,6 +84,3 @@ class IntegrationTest(object):
tasks.uninstall_master(replica) tasks.uninstall_master(replica)
for client in cls.clients: for client in cls.clients:
tasks.uninstall_client(client) tasks.uninstall_client(client)
IntegrationTest.log = log_mgr.get_logger(IntegrationTest())

View File

@@ -158,7 +158,7 @@ class TestBackupAndRestore(IntegrationTest):
with restore_checker(self.master): with restore_checker(self.master):
backup_path = backup(self.master) backup_path = backup(self.master)
self.log.info('Backup path for %s is %s', self.master, backup_path) log.info('Backup path for %s is %s', self.master, backup_path)
self.master.run_command(['ipa-server-install', self.master.run_command(['ipa-server-install',
'--uninstall', '--uninstall',
@@ -181,7 +181,7 @@ class TestBackupAndRestore(IntegrationTest):
with restore_checker(self.master): with restore_checker(self.master):
backup_path = backup(self.master) backup_path = backup(self.master)
self.log.info('Backup path for %s is %s', self.master, backup_path) log.info('Backup path for %s is %s', self.master, backup_path)
self.master.run_command(['ipa-server-install', self.master.run_command(['ipa-server-install',
'--uninstall', '--uninstall',
@@ -295,8 +295,10 @@ class BaseBackupAndRestoreWithDNSSEC(IntegrationTest):
'--dnssec', 'true', '--dnssec', 'true',
]) ])
assert wait_until_record_is_signed(self.master.ip, assert (
self.example_test_zone, self.log), "Zone is not signed" wait_until_record_is_signed(
self.master.ip, self.example_test_zone)
), "Zone is not signed"
backup_path = backup(self.master) backup_path = backup(self.master)
@@ -311,9 +313,10 @@ class BaseBackupAndRestoreWithDNSSEC(IntegrationTest):
self.master.run_command(['ipa-restore', backup_path], self.master.run_command(['ipa-restore', backup_path],
stdin_text=dirman_password + '\nyes') stdin_text=dirman_password + '\nyes')
assert wait_until_record_is_signed(self.master.ip, assert (
self.example_test_zone, self.log), ("Zone is not signed after " wait_until_record_is_signed(
"restore") self.master.ip, self.example_test_zone)
), "Zone is not signed after restore"
tasks.kinit_admin(self.master) tasks.kinit_admin(self.master)
self.master.run_command([ self.master.run_command([
@@ -322,8 +325,10 @@ class BaseBackupAndRestoreWithDNSSEC(IntegrationTest):
'--dnssec', 'true', '--dnssec', 'true',
]) ])
assert wait_until_record_is_signed(self.master.ip, assert (
self.example2_test_zone, self.log), "A new zone is not signed" wait_until_record_is_signed(
self.master.ip, self.example2_test_zone)
), "A new zone is not signed"
class TestBackupAndRestoreWithDNSSEC(BaseBackupAndRestoreWithDNSSEC): class TestBackupAndRestoreWithDNSSEC(BaseBackupAndRestoreWithDNSSEC):

View File

@@ -17,6 +17,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os import os
import tempfile import tempfile
import shutil import shutil
@@ -34,6 +35,8 @@ from ipatests.test_integration.base import IntegrationTest
from ipatests.pytest_plugins.integration import tasks from ipatests.pytest_plugins.integration import tasks
from ipalib.constants import DOMAIN_LEVEL_0 from ipalib.constants import DOMAIN_LEVEL_0
logger = logging.getLogger(__name__)
_DEFAULT = object() _DEFAULT = object()
assert_error = tasks.assert_error assert_error = tasks.assert_error
@@ -331,20 +334,20 @@ class CALessBase(IntegrationTest):
""" """
with open(self.pem_filename) as f: with open(self.pem_filename) as f:
expected_cacrt = f.read() expected_cacrt = f.read()
self.log.debug('Expected /etc/ipa/ca.crt contents:\n%s', logger.debug('Expected /etc/ipa/ca.crt contents:\n%s',
expected_cacrt) expected_cacrt)
expected_binary_cacrt = base64.b64decode(x509.strip_header( expected_binary_cacrt = base64.b64decode(x509.strip_header(
expected_cacrt)) expected_cacrt))
self.log.debug('Expected binary CA cert:\n%r', logger.debug('Expected binary CA cert:\n%r',
expected_binary_cacrt) expected_binary_cacrt)
for host in [self.master] + self.replicas: for host in [self.master] + self.replicas:
# Check the LDAP entry # Check the LDAP entry
ldap = host.ldap_connect() ldap = host.ldap_connect()
entry = ldap.get_entry(DN(('cn', 'CACert'), ('cn', 'ipa'), entry = ldap.get_entry(DN(('cn', 'CACert'), ('cn', 'ipa'),
('cn', 'etc'), host.domain.basedn)) ('cn', 'etc'), host.domain.basedn))
cert_from_ldap = entry.single_value['cACertificate'] cert_from_ldap = entry.single_value['cACertificate']
self.log.debug('CA cert from LDAP on %s:\n%r', logger.debug('CA cert from LDAP on %s:\n%r',
host, cert_from_ldap) host, cert_from_ldap)
assert cert_from_ldap == expected_binary_cacrt assert cert_from_ldap == expected_binary_cacrt
# Verify certmonger was not started # Verify certmonger was not started
@@ -354,11 +357,11 @@ class CALessBase(IntegrationTest):
for host in self.get_all_hosts(): for host in self.get_all_hosts():
# Check the cert PEM file # Check the cert PEM file
remote_cacrt = host.get_file_contents(paths.IPA_CA_CRT) remote_cacrt = host.get_file_contents(paths.IPA_CA_CRT)
self.log.debug('%s:/etc/ipa/ca.crt contents:\n%s', logger.debug('%s:/etc/ipa/ca.crt contents:\n%s',
host, remote_cacrt) host, remote_cacrt)
binary_cacrt = base64.b64decode(x509.strip_header(remote_cacrt)) binary_cacrt = base64.b64decode(x509.strip_header(remote_cacrt))
self.log.debug('%s: Decoded /etc/ipa/ca.crt:\n%r', logger.debug('%s: Decoded /etc/ipa/ca.crt:\n%r',
host, binary_cacrt) host, binary_cacrt)
assert expected_binary_cacrt == binary_cacrt assert expected_binary_cacrt == binary_cacrt
@@ -1182,7 +1185,7 @@ class TestIPACommands(CALessBase):
tasks.kinit_admin(cls.master) tasks.kinit_admin(cls.master)
cls.client_pem = ''.join(cls.get_pem('ca1/client').splitlines()[1:-1]) cls.client_pem = ''.join(cls.get_pem('ca1/client').splitlines()[1:-1])
cls.log.debug('Client PEM:\n%r' % cls.client_pem) logger.debug('Client PEM:\n%r', cls.client_pem)
cls.test_hostname = 'testhost.%s' % cls.master.domain.name cls.test_hostname = 'testhost.%s' % cls.master.domain.name
cls.test_service = 'test/%s' % cls.test_hostname cls.test_service = 'test/%s' % cls.test_hostname

View File

@@ -1,6 +1,7 @@
# #
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license # Copyright (C) 2016 FreeIPA Contributors see COPYING for license
# #
import logging
import time import time
import dns.resolver import dns.resolver
import dns.rrset import dns.rrset
@@ -11,6 +12,8 @@ from ipatests.test_integration.base import IntegrationTest
from ipatests.pytest_plugins.integration import tasks from ipatests.pytest_plugins.integration import tasks
from ipapython.dnsutil import DNSName from ipapython.dnsutil import DNSName
logger = logging.getLogger(__name__)
IPA_DEFAULT_MASTER_SRV_REC = ( IPA_DEFAULT_MASTER_SRV_REC = (
# srv record name, port # srv record name, port
(DNSName(u'_ldap._tcp'), 389), (DNSName(u'_ldap._tcp'), 389),
@@ -23,7 +26,7 @@ IPA_DEFAULT_MASTER_SRV_REC = (
) )
def resolve_records_from_server(rname, rtype, nameserver, logger): def resolve_records_from_server(rname, rtype, nameserver):
res = dns.resolver.Resolver() res = dns.resolver.Resolver()
res.nameservers = [nameserver] res.nameservers = [nameserver]
res.lifetime = 10 res.lifetime = 10
@@ -86,7 +89,7 @@ class TestDNSLocations(IntegrationTest):
expected = _gen_expected_srv_rrset( expected = _gen_expected_srv_rrset(
name_abs, port, expected_servers) name_abs, port, expected_servers)
query = resolve_records_from_server( query = resolve_records_from_server(
name_abs, 'SRV', server_ip, self.log) name_abs, 'SRV', server_ip)
assert expected == query, ( assert expected == query, (
"Expected and received DNS data do not match on server " "Expected and received DNS data do not match on server "
"with IP: '{}' for name '{}' (expected:\n{}\ngot:\n{})".format( "with IP: '{}' for name '{}' (expected:\n{}\ngot:\n{})".format(

View File

@@ -2,6 +2,8 @@
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license # Copyright (C) 2015 FreeIPA Contributors see COPYING for license
# #
import logging
import dns.dnssec import dns.dnssec
import dns.resolver import dns.resolver
import dns.name import dns.name
@@ -11,6 +13,8 @@ from ipatests.test_integration.base import IntegrationTest
from ipatests.pytest_plugins.integration import tasks from ipatests.pytest_plugins.integration import tasks
from ipaplatform.paths import paths from ipaplatform.paths import paths
logger = logging.getLogger(__name__)
test_zone = "dnssec.test." test_zone = "dnssec.test."
test_zone_repl = "dnssec-replica.test." test_zone_repl = "dnssec-replica.test."
root_zone = "." root_zone = "."
@@ -19,7 +23,7 @@ example2_test_zone = "example2.test."
example3_test_zone = "example3.test." example3_test_zone = "example3.test."
def resolve_with_dnssec(nameserver, query, log, rtype="SOA"): def resolve_with_dnssec(nameserver, query, rtype="SOA"):
res = dns.resolver.Resolver() res = dns.resolver.Resolver()
res.nameservers = [nameserver] res.nameservers = [nameserver]
res.lifetime = 10 # wait max 10 seconds for reply res.lifetime = 10 # wait max 10 seconds for reply
@@ -32,17 +36,18 @@ def resolve_with_dnssec(nameserver, query, log, rtype="SOA"):
ans = res.query(query, rtype) ans = res.query(query, rtype)
return ans return ans
def get_RRSIG_record(nameserver, query, log, rtype="SOA"):
ans = resolve_with_dnssec(nameserver, query, log, rtype=rtype) def get_RRSIG_record(nameserver, query, rtype="SOA"):
ans = resolve_with_dnssec(nameserver, query, rtype=rtype)
return ans.response.find_rrset( return ans.response.find_rrset(
ans.response.answer, dns.name.from_text(query), ans.response.answer, dns.name.from_text(query),
dns.rdataclass.IN, dns.rdatatype.RRSIG, dns.rdataclass.IN, dns.rdatatype.RRSIG,
dns.rdatatype.from_text(rtype)) dns.rdatatype.from_text(rtype))
def is_record_signed(nameserver, query, log, rtype="SOA"): def is_record_signed(nameserver, query, rtype="SOA"):
try: try:
get_RRSIG_record(nameserver, query, log, rtype=rtype) get_RRSIG_record(nameserver, query, rtype=rtype)
except KeyError: except KeyError:
return False return False
except dns.exception.DNSException: except dns.exception.DNSException:
@@ -50,7 +55,7 @@ def is_record_signed(nameserver, query, log, rtype="SOA"):
return True return True
def wait_until_record_is_signed(nameserver, record, log, rtype="SOA", def wait_until_record_is_signed(nameserver, record, rtype="SOA",
timeout=100): timeout=100):
""" """
Returns True if record is signed, or False on timeout Returns True if record is signed, or False on timeout
@@ -61,11 +66,11 @@ def wait_until_record_is_signed(nameserver, record, log, rtype="SOA",
:param timeout: :param timeout:
:return: True if records is signed, False if timeout :return: True if records is signed, False if timeout
""" """
log.info("Waiting for signed %s record of %s from server %s (timeout %s " logger.info("Waiting for signed %s record of %s from server %s (timeout "
"sec)", rtype, record, nameserver, timeout) "%s sec)", rtype, record, nameserver, timeout)
wait_until = time.time() + timeout wait_until = time.time() + timeout
while time.time() < wait_until: while time.time() < wait_until:
if is_record_signed(nameserver, record, log, rtype=rtype): if is_record_signed(nameserver, record, rtype=rtype):
return True return True
time.sleep(1) time.sleep(1)
return False return False
@@ -107,12 +112,12 @@ class TestInstallDNSSECLast(IntegrationTest):
# test master # test master
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.master.ip, test_zone, self.log, timeout=100 self.master.ip, test_zone, timeout=100
), "Zone %s is not signed (master)" % test_zone ), "Zone %s is not signed (master)" % test_zone
# test replica # test replica
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[0].ip, test_zone, self.log, timeout=200 self.replicas[0].ip, test_zone, timeout=200
), "DNS zone %s is not signed (replica)" % test_zone ), "DNS zone %s is not signed (replica)" % test_zone
def test_if_zone_is_signed_replica(self): def test_if_zone_is_signed_replica(self):
@@ -127,20 +132,20 @@ class TestInstallDNSSECLast(IntegrationTest):
# test replica # test replica
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[0].ip, test_zone_repl, self.log, timeout=300 self.replicas[0].ip, test_zone_repl, timeout=300
), "Zone %s is not signed (replica)" % test_zone_repl ), "Zone %s is not signed (replica)" % test_zone_repl
# we do not need to wait, on master zones should be singed faster # we do not need to wait, on master zones should be singed faster
# than on replicas # than on replicas
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.master.ip, test_zone_repl, self.log, timeout=5 self.master.ip, test_zone_repl, timeout=5
), "DNS zone %s is not signed (master)" % test_zone ), "DNS zone %s is not signed (master)" % test_zone
def test_disable_reenable_signing_master(self): def test_disable_reenable_signing_master(self):
dnskey_old = resolve_with_dnssec(self.master.ip, test_zone, dnskey_old = resolve_with_dnssec(self.master.ip, test_zone,
self.log, rtype="DNSKEY").rrset rtype="DNSKEY").rrset
# disable DNSSEC signing of zone on master # disable DNSSEC signing of zone on master
args = [ args = [
@@ -154,12 +159,12 @@ class TestInstallDNSSECLast(IntegrationTest):
# test master # test master
assert not is_record_signed( assert not is_record_signed(
self.master.ip, test_zone, self.log self.master.ip, test_zone
), "Zone %s is still signed (master)" % test_zone ), "Zone %s is still signed (master)" % test_zone
# test replica # test replica
assert not is_record_signed( assert not is_record_signed(
self.replicas[0].ip, test_zone, self.log self.replicas[0].ip, test_zone
), "DNS zone %s is still signed (replica)" % test_zone ), "DNS zone %s is still signed (replica)" % test_zone
# reenable DNSSEC signing # reenable DNSSEC signing
@@ -172,22 +177,22 @@ class TestInstallDNSSECLast(IntegrationTest):
# test master # test master
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.master.ip, test_zone, self.log, timeout=100 self.master.ip, test_zone, timeout=100
), "Zone %s is not signed (master)" % test_zone ), "Zone %s is not signed (master)" % test_zone
# test replica # test replica
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[0].ip, test_zone, self.log, timeout=200 self.replicas[0].ip, test_zone, timeout=200
), "DNS zone %s is not signed (replica)" % test_zone ), "DNS zone %s is not signed (replica)" % test_zone
dnskey_new = resolve_with_dnssec(self.master.ip, test_zone, dnskey_new = resolve_with_dnssec(self.master.ip, test_zone,
self.log, rtype="DNSKEY").rrset rtype="DNSKEY").rrset
assert dnskey_old != dnskey_new, "DNSKEY should be different" assert dnskey_old != dnskey_new, "DNSKEY should be different"
def test_disable_reenable_signing_replica(self): def test_disable_reenable_signing_replica(self):
dnskey_old = resolve_with_dnssec(self.replicas[0].ip, test_zone_repl, dnskey_old = resolve_with_dnssec(self.replicas[0].ip, test_zone_repl,
self.log, rtype="DNSKEY").rrset rtype="DNSKEY").rrset
# disable DNSSEC signing of zone on replica # disable DNSSEC signing of zone on replica
args = [ args = [
@@ -201,12 +206,12 @@ class TestInstallDNSSECLast(IntegrationTest):
# test master # test master
assert not is_record_signed( assert not is_record_signed(
self.master.ip, test_zone_repl, self.log self.master.ip, test_zone_repl
), "Zone %s is still signed (master)" % test_zone_repl ), "Zone %s is still signed (master)" % test_zone_repl
# test replica # test replica
assert not is_record_signed( assert not is_record_signed(
self.replicas[0].ip, test_zone_repl, self.log self.replicas[0].ip, test_zone_repl
), "DNS zone %s is still signed (replica)" % test_zone_repl ), "DNS zone %s is still signed (replica)" % test_zone_repl
# reenable DNSSEC signing # reenable DNSSEC signing
@@ -219,16 +224,16 @@ class TestInstallDNSSECLast(IntegrationTest):
# test master # test master
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.master.ip, test_zone_repl, self.log, timeout=100 self.master.ip, test_zone_repl, timeout=100
), "Zone %s is not signed (master)" % test_zone_repl ), "Zone %s is not signed (master)" % test_zone_repl
# test replica # test replica
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[0].ip, test_zone_repl, self.log, timeout=200 self.replicas[0].ip, test_zone_repl, timeout=200
), "DNS zone %s is not signed (replica)" % test_zone_repl ), "DNS zone %s is not signed (replica)" % test_zone_repl
dnskey_new = resolve_with_dnssec(self.replicas[0].ip, test_zone_repl, dnskey_new = resolve_with_dnssec(self.replicas[0].ip, test_zone_repl,
self.log, rtype="DNSKEY").rrset rtype="DNSKEY").rrset
assert dnskey_old != dnskey_new, "DNSKEY should be different" assert dnskey_old != dnskey_new, "DNSKEY should be different"
@@ -292,12 +297,12 @@ class TestInstallDNSSECFirst(IntegrationTest):
self.master.run_command(args) self.master.run_command(args)
# test master # test master
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.master.ip, root_zone, self.log, timeout=100 self.master.ip, root_zone, timeout=100
), "Zone %s is not signed (master)" % root_zone ), "Zone %s is not signed (master)" % root_zone
# test replica # test replica
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[0].ip, root_zone, self.log, timeout=300 self.replicas[0].ip, root_zone, timeout=300
), "Zone %s is not signed (replica)" % root_zone ), "Zone %s is not signed (replica)" % root_zone
def test_chain_of_trust(self): def test_chain_of_trust(self):
@@ -322,15 +327,15 @@ class TestInstallDNSSECFirst(IntegrationTest):
self.master.run_command(args) self.master.run_command(args)
# wait until zone is signed # wait until zone is signed
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.master.ip, example_test_zone, self.log, timeout=100 self.master.ip, example_test_zone, timeout=100
), "Zone %s is not signed (master)" % example_test_zone ), "Zone %s is not signed (master)" % example_test_zone
# wait until zone is signed # wait until zone is signed
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[0].ip, example_test_zone, self.log, timeout=200 self.replicas[0].ip, example_test_zone, timeout=200
), "Zone %s is not signed (replica)" % example_test_zone ), "Zone %s is not signed (replica)" % example_test_zone
# GET DNSKEY records from zone # GET DNSKEY records from zone
ans = resolve_with_dnssec(self.master.ip, example_test_zone, self.log, ans = resolve_with_dnssec(self.master.ip, example_test_zone,
rtype="DNSKEY") rtype="DNSKEY")
dnskey_rrset = ans.response.get_rrset( dnskey_rrset = ans.response.get_rrset(
ans.response.answer, ans.response.answer,
@@ -339,7 +344,7 @@ class TestInstallDNSSECFirst(IntegrationTest):
dns.rdatatype.DNSKEY) dns.rdatatype.DNSKEY)
assert dnskey_rrset, "No DNSKEY records received" assert dnskey_rrset, "No DNSKEY records received"
self.log.debug("DNSKEY records returned: %s", dnskey_rrset.to_text()) logger.debug("DNSKEY records returned: %s", dnskey_rrset.to_text())
# generate DS records # generate DS records
ds_records = [] ds_records = []
@@ -351,8 +356,8 @@ class TestInstallDNSSECFirst(IntegrationTest):
assert ds_records, ("No KSK returned from the %s zone" % assert ds_records, ("No KSK returned from the %s zone" %
example_test_zone) example_test_zone)
self.log.debug("DS records for %s created: %r", example_test_zone, logger.debug("DS records for %s created: %r", example_test_zone,
ds_records) ds_records)
# add DS records to root zone # add DS records to root zone
args = [ args = [
@@ -368,12 +373,12 @@ class TestInstallDNSSECFirst(IntegrationTest):
# wait until DS records it replicated # wait until DS records it replicated
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[0].ip, example_test_zone, self.log, timeout=100, self.replicas[0].ip, example_test_zone, timeout=100,
rtype="DS" rtype="DS"
), "No DS record of '%s' returned from replica" % example_test_zone ), "No DS record of '%s' returned from replica" % example_test_zone
# extract DSKEY from root zone # extract DSKEY from root zone
ans = resolve_with_dnssec(self.master.ip, root_zone, self.log, ans = resolve_with_dnssec(self.master.ip, root_zone,
rtype="DNSKEY") rtype="DNSKEY")
dnskey_rrset = ans.response.get_rrset(ans.response.answer, dnskey_rrset = ans.response.get_rrset(ans.response.answer,
dns.name.from_text(root_zone), dns.name.from_text(root_zone),
@@ -381,7 +386,7 @@ class TestInstallDNSSECFirst(IntegrationTest):
dns.rdatatype.DNSKEY) dns.rdatatype.DNSKEY)
assert dnskey_rrset, "No DNSKEY records received" assert dnskey_rrset, "No DNSKEY records received"
self.log.debug("DNSKEY records returned: %s", dnskey_rrset.to_text()) logger.debug("DNSKEY records returned: %s", dnskey_rrset.to_text())
# export trust keys for root zone # export trust keys for root zone
root_key_rdatas = [] root_key_rdatas = []
@@ -395,7 +400,7 @@ class TestInstallDNSSECFirst(IntegrationTest):
root_keys_rrset = dns.rrset.from_rdata_list(dnskey_rrset.name, root_keys_rrset = dns.rrset.from_rdata_list(dnskey_rrset.name,
dnskey_rrset.ttl, dnskey_rrset.ttl,
root_key_rdatas) root_key_rdatas)
self.log.debug("Root zone trusted key: %s", root_keys_rrset.to_text()) logger.debug("Root zone trusted key: %s", root_keys_rrset.to_text())
# set trusted key for our root zone # set trusted key for our root zone
self.master.put_file_contents(paths.DNSSEC_TRUSTED_KEY, self.master.put_file_contents(paths.DNSSEC_TRUSTED_KEY,
@@ -459,15 +464,15 @@ class TestMigrateDNSSECMaster(IntegrationTest):
# wait until zone is signed # wait until zone is signed
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.master.ip, example_test_zone, self.log, timeout=100 self.master.ip, example_test_zone, timeout=100
), "Zone %s is not signed (master)" % example_test_zone ), "Zone %s is not signed (master)" % example_test_zone
# wait until zone is signed # wait until zone is signed
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[0].ip, example_test_zone, self.log, timeout=200 self.replicas[0].ip, example_test_zone, timeout=200
), "Zone %s is not signed (replica)" % example_test_zone ), "Zone %s is not signed (replica)" % example_test_zone
dnskey_old = resolve_with_dnssec(self.master.ip, example_test_zone, dnskey_old = resolve_with_dnssec(self.master.ip, example_test_zone,
self.log, rtype="DNSKEY").rrset rtype="DNSKEY").rrset
# migrate dnssec master to replica # migrate dnssec master to replica
args = [ args = [
@@ -495,16 +500,16 @@ class TestMigrateDNSSECMaster(IntegrationTest):
# wait until zone is signed # wait until zone is signed
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.master.ip, example_test_zone, self.log, timeout=100 self.master.ip, example_test_zone, timeout=100
), "Zone %s is not signed after migration (master)" % example_test_zone ), "Zone %s is not signed after migration (master)" % example_test_zone
# wait until zone is signed # wait until zone is signed
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[0].ip, example_test_zone, self.log, timeout=200 self.replicas[0].ip, example_test_zone, timeout=200
), "Zone %s is not signed after migration (replica)" % example_test_zone ), "Zone %s is not signed after migration (replica)" % example_test_zone
# test if dnskey are the same # test if dnskey are the same
dnskey_new = resolve_with_dnssec(self.master.ip, example_test_zone, dnskey_new = resolve_with_dnssec(self.master.ip, example_test_zone,
self.log, rtype="DNSKEY").rrset rtype="DNSKEY").rrset
assert dnskey_old == dnskey_new, "DNSKEY should be the same" assert dnskey_old == dnskey_new, "DNSKEY should be the same"
# add test zone # add test zone
@@ -515,12 +520,12 @@ class TestMigrateDNSSECMaster(IntegrationTest):
self.replicas[0].run_command(args) self.replicas[0].run_command(args)
# wait until zone is signed # wait until zone is signed
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[0].ip, example2_test_zone, self.log, timeout=100 self.replicas[0].ip, example2_test_zone, timeout=100
), ("Zone %s is not signed after migration (replica - dnssec master)" ), ("Zone %s is not signed after migration (replica - dnssec master)"
% example2_test_zone) % example2_test_zone)
# wait until zone is signed # wait until zone is signed
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.master.ip, example2_test_zone, self.log, timeout=200 self.master.ip, example2_test_zone, timeout=200
), ("Zone %s is not signed after migration (master)" ), ("Zone %s is not signed after migration (master)"
% example2_test_zone) % example2_test_zone)
@@ -530,12 +535,12 @@ class TestMigrateDNSSECMaster(IntegrationTest):
# test if originial zones are signed on new replica # test if originial zones are signed on new replica
# wait until zone is signed # wait until zone is signed
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[1].ip, example_test_zone, self.log, timeout=200 self.replicas[1].ip, example_test_zone, timeout=200
), ("Zone %s is not signed (new replica)" ), ("Zone %s is not signed (new replica)"
% example_test_zone) % example_test_zone)
# wait until zone is signed # wait until zone is signed
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[1].ip, example2_test_zone, self.log, timeout=200 self.replicas[1].ip, example2_test_zone, timeout=200
), ("Zone %s is not signed (new replica)" ), ("Zone %s is not signed (new replica)"
% example2_test_zone) % example2_test_zone)
@@ -547,15 +552,15 @@ class TestMigrateDNSSECMaster(IntegrationTest):
self.replicas[1].run_command(args) self.replicas[1].run_command(args)
# wait until zone is signed # wait until zone is signed
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[1].ip, example3_test_zone, self.log, timeout=200 self.replicas[1].ip, example3_test_zone, timeout=200
), ("Zone %s is not signed (new replica)" ), ("Zone %s is not signed (new replica)"
% example3_test_zone) % example3_test_zone)
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.replicas[0].ip, example3_test_zone, self.log, timeout=200 self.replicas[0].ip, example3_test_zone, timeout=200
), ("Zone %s is not signed (replica)" ), ("Zone %s is not signed (replica)"
% example3_test_zone) % example3_test_zone)
# wait until zone is signed # wait until zone is signed
assert wait_until_record_is_signed( assert wait_until_record_is_signed(
self.master.ip, example3_test_zone, self.log, timeout=200 self.master.ip, example3_test_zone, timeout=200
), ("Zone %s is not signed (master)" ), ("Zone %s is not signed (master)"
% example3_test_zone) % example3_test_zone)

View File

@@ -16,6 +16,8 @@
# #
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os import os
import subprocess import subprocess
from ipaplatform.paths import paths from ipaplatform.paths import paths
@@ -24,6 +26,8 @@ import pytest
from ipatests.test_integration.base import IntegrationTest from ipatests.test_integration.base import IntegrationTest
from ipatests.pytest_plugins.integration import tasks from ipatests.pytest_plugins.integration import tasks
logger = logging.getLogger(__name__)
CLIENT_KEYTAB = paths.KRB5_KEYTAB CLIENT_KEYTAB = paths.KRB5_KEYTAB
@@ -253,7 +257,8 @@ class TestForcedClientReenrollment(IntegrationTest):
assert sshfp_record, 'SSHFP record not found' assert sshfp_record, 'SSHFP record not found'
sshfp_record = set(sshfp_record.split(', ')) sshfp_record = set(sshfp_record.split(', '))
self.log.debug("SSHFP record for host %s: %s", client_host, str(sshfp_record)) logger.debug("SSHFP record for host %s: %s",
client_host, str(sshfp_record))
return sshfp_record return sshfp_record

View File

@@ -206,18 +206,17 @@ ipa_class_members = {
'Method', 'Method',
'Updater', 'Updater',
'Advice', 'Advice',
] + LOGGING_ATTRS, ],
'ipalib.util.ForwarderValidationError': [ 'ipalib.util.ForwarderValidationError': [
'msg', 'msg',
], ],
'ipaserver.install.ldapupdate.LDAPUpdate': LOGGING_ATTRS,
'ipaserver.plugins.dns.DNSRecord': [ 'ipaserver.plugins.dns.DNSRecord': [
'validatedns', 'validatedns',
'normalizedns', 'normalizedns',
], ],
'ipaserver.rpcserver.KerberosSession': [ 'ipaserver.rpcserver.KerberosSession': [
fake_api, fake_api,
] + LOGGING_ATTRS, ],
'ipatests.test_integration.base.IntegrationTest': [ 'ipatests.test_integration.base.IntegrationTest': [
'domain', 'domain',
{'master': [ {'master': [