2013-11-27 14:53:57 +01:00
|
|
|
#! /usr/bin/python2 -E
|
2007-08-16 18:00:16 -04:00
|
|
|
# Authors: Simo Sorce <ssorce@redhat.com>
|
|
|
|
|
# Karl MacMillan <kmacmillan@mentalrootkit.com>
|
|
|
|
|
#
|
|
|
|
|
# Copyright (C) 2007 Red Hat
|
|
|
|
|
# see file 'COPYING' for use and warranty information
|
|
|
|
|
#
|
2010-12-09 13:59:11 +01:00
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
|
# (at your option) any later version.
|
2007-08-16 18:00:16 -04:00
|
|
|
#
|
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
|
#
|
|
|
|
|
# You should have received a copy of the GNU General Public License
|
2010-12-09 13:59:11 +01:00
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2007-08-16 18:00:16 -04:00
|
|
|
#
|
|
|
|
|
|
2008-05-30 14:21:45 -04:00
|
|
|
try:
|
|
|
|
|
import sys
|
|
|
|
|
|
|
|
|
|
import os
|
2010-09-17 17:20:23 -04:00
|
|
|
import time
|
2008-05-30 14:21:45 -04:00
|
|
|
import socket
|
2012-11-15 14:57:52 -05:00
|
|
|
import urlparse
|
2009-11-19 14:14:42 -05:00
|
|
|
import tempfile
|
|
|
|
|
import getpass
|
2013-01-31 07:46:33 -05:00
|
|
|
from ConfigParser import RawConfigParser
|
|
|
|
|
from optparse import SUPPRESS_HELP, OptionGroup, OptionValueError
|
2014-06-12 13:40:56 +02:00
|
|
|
import shutil
|
2015-08-18 19:45:23 +02:00
|
|
|
import dns
|
2015-07-20 16:04:07 +02:00
|
|
|
import gssapi
|
2013-01-31 07:46:33 -05:00
|
|
|
|
|
|
|
|
import nss.nss as nss
|
|
|
|
|
import SSSDConfig
|
|
|
|
|
|
|
|
|
|
from ipapython.ipa_log_manager import standard_logging_setup, root_logger
|
2011-07-06 10:30:24 -04:00
|
|
|
from ipaclient import ipadiscovery
|
2008-05-30 14:21:45 -04:00
|
|
|
import ipaclient.ipachangeconf
|
|
|
|
|
import ipaclient.ntpconf
|
2013-01-31 07:46:33 -05:00
|
|
|
from ipapython.ipautil import (
|
2013-11-04 11:52:02 +01:00
|
|
|
run, user_input, CalledProcessError, file_exists, dir_exists,
|
|
|
|
|
realm_to_suffix)
|
2014-05-29 10:18:21 +02:00
|
|
|
from ipaplatform.tasks import tasks
|
2014-05-29 10:44:57 +02:00
|
|
|
from ipaplatform import services
|
|
|
|
|
from ipaplatform.paths import paths
|
2013-01-31 07:46:33 -05:00
|
|
|
from ipapython import ipautil, sysrestore, version, certmonger, ipaldap
|
2014-09-18 16:28:59 +02:00
|
|
|
from ipapython import kernel_keyring, certdb
|
2010-10-29 20:24:31 +02:00
|
|
|
from ipapython.config import IPAOptionParser
|
2011-12-07 03:15:45 -05:00
|
|
|
from ipalib import api, errors
|
2014-06-12 12:04:59 +02:00
|
|
|
from ipalib import x509, certstore
|
2013-09-11 08:27:34 +00:00
|
|
|
from ipalib.constants import CACERT
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 07:36:35 -04:00
|
|
|
from ipapython.dn import DN
|
2012-09-03 09:33:30 -04:00
|
|
|
from ipapython.ssh import SSHPublicKey
|
2012-12-04 18:20:17 -05:00
|
|
|
from ipalib.rpc import delete_persistent_client_session_data
|
2014-05-27 09:13:59 +02:00
|
|
|
|
2015-08-10 18:00:36 +02:00
|
|
|
except ImportError as e:
|
2008-05-30 14:21:45 -04:00
|
|
|
print >> sys.stderr, """\
|
|
|
|
|
There was a problem importing one of the required Python modules. The
|
|
|
|
|
error was:
|
|
|
|
|
|
|
|
|
|
%s
|
2015-08-10 18:00:36 +02:00
|
|
|
""" % e
|
2008-05-30 14:21:45 -04:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
2012-11-15 14:57:52 -05:00
|
|
|
SUCCESS = 0
|
2011-08-29 17:44:02 -04:00
|
|
|
CLIENT_INSTALL_ERROR = 1
|
|
|
|
|
CLIENT_NOT_CONFIGURED = 2
|
|
|
|
|
CLIENT_ALREADY_CONFIGURED = 3
|
|
|
|
|
CLIENT_UNINSTALL_ERROR = 4 # error after restoring files/state
|
|
|
|
|
|
2007-08-16 18:00:16 -04:00
|
|
|
def parse_options():
|
2012-11-15 14:57:52 -05:00
|
|
|
def validate_ca_cert_file_option(option, opt, value, parser):
|
|
|
|
|
if not os.path.exists(value):
|
|
|
|
|
raise OptionValueError("%s option '%s' does not exist" % (opt, value))
|
|
|
|
|
if not os.path.isfile(value):
|
|
|
|
|
raise OptionValueError("%s option '%s' is not a file" % (opt, value))
|
|
|
|
|
if not os.path.isabs(value):
|
|
|
|
|
raise OptionValueError("%s option '%s' is not an absolute file path" % (opt, value))
|
|
|
|
|
|
2013-01-16 13:20:14 -05:00
|
|
|
initialized = nss.nss_is_initialized()
|
2012-11-15 14:57:52 -05:00
|
|
|
try:
|
|
|
|
|
cert = x509.load_certificate_from_file(value)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-11-15 14:57:52 -05:00
|
|
|
raise OptionValueError("%s option '%s' is not a valid certificate file" % (opt, value))
|
2013-01-16 13:20:14 -05:00
|
|
|
else:
|
|
|
|
|
del(cert)
|
|
|
|
|
if not initialized:
|
|
|
|
|
nss.nss_shutdown()
|
2012-11-15 14:57:52 -05:00
|
|
|
|
|
|
|
|
parser.values.ca_cert_file = value
|
|
|
|
|
|
2015-03-16 16:30:55 +01:00
|
|
|
def kinit_attempts_callback(option, opt, value, parser):
|
|
|
|
|
if value < 1:
|
|
|
|
|
raise OptionValueError(
|
|
|
|
|
"Option %s expects an integer greater than 0."
|
|
|
|
|
% opt)
|
|
|
|
|
|
|
|
|
|
parser.values.kinit_attempts = value
|
|
|
|
|
|
2010-10-29 20:24:31 +02:00
|
|
|
parser = IPAOptionParser(version=version.VERSION)
|
2011-09-05 11:04:17 +02:00
|
|
|
|
|
|
|
|
basic_group = OptionGroup(parser, "basic options")
|
|
|
|
|
basic_group.add_option("--domain", dest="domain", help="domain name")
|
2012-07-03 17:37:22 -04:00
|
|
|
basic_group.add_option("--server", dest="server", help="IPA server", action="append")
|
2011-09-05 11:04:17 +02:00
|
|
|
basic_group.add_option("--realm", dest="realm_name", help="realm name")
|
2012-06-11 15:43:04 -04:00
|
|
|
basic_group.add_option("--fixed-primary", dest="primary", action="store_true",
|
|
|
|
|
default=False, help="Configure sssd to use fixed server as primary IPA server")
|
2011-09-05 11:04:17 +02:00
|
|
|
basic_group.add_option("-p", "--principal", dest="principal",
|
|
|
|
|
help="principal to use to join the IPA realm"),
|
|
|
|
|
basic_group.add_option("-w", "--password", dest="password", sensitive=True,
|
2010-05-05 14:52:39 -04:00
|
|
|
help="password to join the IPA realm (assumes bulk password unless principal is also set)"),
|
2013-02-26 13:20:13 +01:00
|
|
|
basic_group.add_option("-k", "--keytab", dest="keytab",
|
|
|
|
|
help="path to backed up keytab from previous enrollment"),
|
2011-09-05 11:04:17 +02:00
|
|
|
basic_group.add_option("-W", dest="prompt_password", action="store_true",
|
2009-11-19 14:14:42 -05:00
|
|
|
default=False,
|
|
|
|
|
help="Prompt for a password to join the IPA realm"),
|
2011-09-05 11:04:17 +02:00
|
|
|
basic_group.add_option("--mkhomedir", dest="mkhomedir",
|
|
|
|
|
action="store_true", default=False,
|
|
|
|
|
help="create home directories for users on their first login")
|
|
|
|
|
basic_group.add_option("", "--hostname", dest="hostname",
|
2012-04-19 19:50:57 +02:00
|
|
|
help="The hostname of this machine (FQDN). If specified, the hostname will be set and "
|
2011-07-19 15:33:53 +03:00
|
|
|
"the system configuration will be updated to persist over reboot. "
|
|
|
|
|
"By default a nodename result from uname(2) is used.")
|
2013-03-18 11:06:22 +01:00
|
|
|
basic_group.add_option("", "--force-join", dest="force_join",
|
|
|
|
|
action="store_true", default=False,
|
|
|
|
|
help="Force client enrollment even if already enrolled")
|
2015-04-14 18:56:47 +02:00
|
|
|
basic_group.add_option("--ntp-server", dest="ntp_servers", action="append",
|
|
|
|
|
help="ntp server to use. This option can be used "
|
|
|
|
|
"multiple times")
|
2011-09-05 11:04:17 +02:00
|
|
|
basic_group.add_option("-N", "--no-ntp", action="store_false",
|
|
|
|
|
help="do not configure ntp", default=True, dest="conf_ntp")
|
2012-12-07 16:44:32 +01:00
|
|
|
basic_group.add_option("", "--force-ntpd", dest="force_ntpd",
|
|
|
|
|
action="store_true", default=False,
|
|
|
|
|
help="Stop and disable any time&date synchronization services besides ntpd")
|
2013-09-25 13:45:45 +02:00
|
|
|
basic_group.add_option("--nisdomain", dest="nisdomain",
|
|
|
|
|
help="NIS domain name")
|
|
|
|
|
basic_group.add_option("--no-nisdomain", action="store_true", default=False,
|
|
|
|
|
help="do not configure NIS domain name",
|
|
|
|
|
dest="no_nisdomain")
|
2011-12-07 03:49:09 -05:00
|
|
|
basic_group.add_option("--ssh-trust-dns", dest="trust_sshfp", default=False, action="store_true",
|
|
|
|
|
help="configure OpenSSH client to trust DNS SSHFP records")
|
2012-09-12 09:19:26 -04:00
|
|
|
basic_group.add_option("--no-ssh", dest="conf_ssh", default=True, action="store_false",
|
|
|
|
|
help="do not configure OpenSSH client")
|
2011-12-07 03:49:09 -05:00
|
|
|
basic_group.add_option("--no-sshd", dest="conf_sshd", default=True, action="store_false",
|
|
|
|
|
help="do not configure OpenSSH server")
|
2013-11-21 13:09:28 +01:00
|
|
|
basic_group.add_option("--no-sudo", dest="conf_sudo", default=True,
|
|
|
|
|
action="store_false",
|
|
|
|
|
help="do not configure SSSD as data source for sudo")
|
2011-12-07 03:40:51 -05:00
|
|
|
basic_group.add_option("--no-dns-sshfp", dest="create_sshfp", default=True, action="store_false",
|
|
|
|
|
help="do not automatically create DNS SSHFP records")
|
2012-02-23 17:24:46 +01:00
|
|
|
basic_group.add_option("--noac", dest="no_ac", default=False, action="store_true",
|
2014-05-27 09:13:59 +02:00
|
|
|
help="do not modify the nsswitch.conf and PAM configuration")
|
2011-09-05 11:04:17 +02:00
|
|
|
basic_group.add_option("-f", "--force", dest="force", action="store_true",
|
|
|
|
|
default=False, help="force setting of LDAP/Kerberos conf")
|
2015-03-16 16:30:55 +01:00
|
|
|
basic_group.add_option('--kinit-attempts', dest='kinit_attempts',
|
|
|
|
|
action='callback', type='int', default=5,
|
|
|
|
|
callback=kinit_attempts_callback,
|
|
|
|
|
help=("number of attempts to obtain host TGT"
|
|
|
|
|
" (defaults to %default)."))
|
2011-09-05 11:04:17 +02:00
|
|
|
basic_group.add_option("-d", "--debug", dest="debug", action="store_true",
|
|
|
|
|
default=False, help="print debugging information")
|
|
|
|
|
basic_group.add_option("-U", "--unattended", dest="unattended",
|
|
|
|
|
action="store_true",
|
|
|
|
|
help="unattended (un)installation never prompts the user")
|
2012-11-15 14:57:52 -05:00
|
|
|
basic_group.add_option("--ca-cert-file", dest="ca_cert_file",
|
|
|
|
|
type="string", action="callback", callback=validate_ca_cert_file_option,
|
|
|
|
|
help="load the CA certificate from this file")
|
2014-10-07 19:07:13 +02:00
|
|
|
basic_group.add_option("--request-cert", dest="request_cert",
|
|
|
|
|
action="store_true", default=False,
|
|
|
|
|
help="request certificate for the machine")
|
2011-09-05 11:04:17 +02:00
|
|
|
# --on-master is used in ipa-server-install and ipa-replica-install
|
|
|
|
|
# only, it isn't meant to be used on clients.
|
|
|
|
|
basic_group.add_option("--on-master", dest="on_master", action="store_true",
|
|
|
|
|
help=SUPPRESS_HELP, default=False)
|
2013-08-30 16:05:01 +02:00
|
|
|
basic_group.add_option("--automount-location", dest="location",
|
|
|
|
|
help="Automount location")
|
2013-11-04 11:52:02 +01:00
|
|
|
basic_group.add_option("--configure-firefox", dest="configure_firefox",
|
|
|
|
|
action="store_true", default=False,
|
|
|
|
|
help="configure Firefox")
|
|
|
|
|
basic_group.add_option("--firefox-dir", dest="firefox_dir", default=None,
|
|
|
|
|
help="specify directory where Firefox is installed (for example: '/usr/lib/firefox')")
|
2015-08-18 19:45:23 +02:00
|
|
|
basic_group.add_option("--ip-address", dest="ip_addresses", default=[],
|
|
|
|
|
action="append", help="Specify IP address that should be added to DNS."
|
|
|
|
|
" This option can be used multiple times")
|
|
|
|
|
basic_group.add_option("--all-ip-addresses", dest="all_ip_addresses",
|
|
|
|
|
default=False, action="store_true", help="All routable IP"
|
|
|
|
|
" addresses configured on any inteface will be added to DNS")
|
|
|
|
|
parser.add_option_group(basic_group)
|
2011-09-05 11:04:17 +02:00
|
|
|
|
|
|
|
|
sssd_group = OptionGroup(parser, "SSSD options")
|
|
|
|
|
sssd_group.add_option("--permit", dest="permit",
|
|
|
|
|
action="store_true", default=False,
|
|
|
|
|
help="disable access rules by default, permit all access.")
|
|
|
|
|
sssd_group.add_option("", "--enable-dns-updates", dest="dns_updates",
|
|
|
|
|
action="store_true", default=False,
|
2011-02-17 08:30:36 -05:00
|
|
|
help="Configures the machine to attempt dns updates when the ip address changes.")
|
2011-09-05 11:04:17 +02:00
|
|
|
sssd_group.add_option("--no-krb5-offline-passwords", dest="krb5_offline_passwords",
|
|
|
|
|
action="store_false", default=True,
|
|
|
|
|
help="Configure SSSD not to store user password when the server is offline")
|
|
|
|
|
sssd_group.add_option("-S", "--no-sssd", dest="sssd",
|
|
|
|
|
action="store_false", default=True,
|
|
|
|
|
help="Do not configure the client to use SSSD for authentication")
|
2011-10-12 19:14:55 +03:00
|
|
|
sssd_group.add_option("--preserve-sssd", dest="preserve_sssd",
|
|
|
|
|
action="store_true", default=False,
|
|
|
|
|
help="Preserve old SSSD configuration if possible")
|
2011-09-05 11:04:17 +02:00
|
|
|
parser.add_option_group(sssd_group)
|
|
|
|
|
|
|
|
|
|
uninstall_group = OptionGroup(parser, "uninstall options")
|
|
|
|
|
uninstall_group.add_option("", "--uninstall", dest="uninstall", action="store_true",
|
|
|
|
|
default=False, help="uninstall an existing installation. The uninstall can " \
|
|
|
|
|
"be run with --unattended option")
|
|
|
|
|
parser.add_option_group(uninstall_group)
|
2007-08-16 18:00:16 -04:00
|
|
|
|
|
|
|
|
options, args = parser.parse_args()
|
2010-10-29 20:24:31 +02:00
|
|
|
safe_opts = parser.get_safe_opts(options)
|
2007-08-16 18:00:16 -04:00
|
|
|
|
2008-04-09 15:55:46 -04:00
|
|
|
if (options.server and not options.domain):
|
|
|
|
|
parser.error("--server cannot be used without providing --domain")
|
|
|
|
|
|
2012-12-07 16:44:32 +01:00
|
|
|
if options.force_ntpd and not options.conf_ntp:
|
|
|
|
|
parser.error("--force-ntpd cannot be used together with --no-ntp")
|
|
|
|
|
|
2013-11-04 11:52:02 +01:00
|
|
|
if options.firefox_dir and not options.configure_firefox:
|
|
|
|
|
parser.error("--firefox-dir cannot be used without --configure-firefox option")
|
|
|
|
|
|
2013-09-25 13:45:45 +02:00
|
|
|
if options.no_nisdomain and options.nisdomain:
|
|
|
|
|
parser.error("--no-nisdomain cannot be used together with --nisdomain")
|
|
|
|
|
|
2015-08-18 19:45:23 +02:00
|
|
|
if options.ip_addresses:
|
|
|
|
|
if options.dns_updates:
|
|
|
|
|
parser.error("--ip-address cannot be used together with"
|
|
|
|
|
" --enable-dns-updates")
|
|
|
|
|
|
|
|
|
|
if options.all_ip_addresses:
|
|
|
|
|
parser.error("--ip-address cannot be used together with"
|
|
|
|
|
" --all-ip-addresses")
|
|
|
|
|
|
2010-10-29 20:24:31 +02:00
|
|
|
return safe_opts, options
|
2007-08-16 18:00:16 -04:00
|
|
|
|
|
|
|
|
def logging_setup(options):
|
2014-06-17 11:45:43 +02:00
|
|
|
log_file = paths.IPACLIENT_INSTALL_LOG
|
2012-06-08 09:36:38 -04:00
|
|
|
|
2008-03-31 17:33:55 -04:00
|
|
|
if options.uninstall:
|
2014-06-17 11:45:43 +02:00
|
|
|
log_file = paths.IPACLIENT_UNINSTALL_LOG
|
2008-03-31 17:33:55 -04:00
|
|
|
|
2012-06-08 09:36:38 -04:00
|
|
|
standard_logging_setup(
|
|
|
|
|
filename=log_file, verbose=True, debug=options.debug,
|
|
|
|
|
console_format='%(message)s')
|
|
|
|
|
|
2007-08-16 18:00:16 -04:00
|
|
|
|
2015-04-14 13:55:33 +02:00
|
|
|
def remove_file(filename):
|
|
|
|
|
"""
|
|
|
|
|
Deletes a file. If the file does not exist (OSError 2) does nothing.
|
|
|
|
|
Otherwise logs an error message and instructs the user to remove the
|
|
|
|
|
offending file manually
|
|
|
|
|
:param filename: name of the file to be removed
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
os.remove(filename)
|
|
|
|
|
except OSError as e:
|
|
|
|
|
if e.errno == 2:
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
root_logger.error("Failed to remove file %s: %s", filename, e)
|
|
|
|
|
root_logger.error('Please remove %s manually, as it can cause '
|
|
|
|
|
'subsequent installation to fail.', filename)
|
|
|
|
|
|
|
|
|
|
|
2011-09-13 00:11:24 +03:00
|
|
|
def log_service_error(name, action, error):
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("%s failed to %s: %s", name, action, str(error))
|
2011-09-13 00:11:24 +03:00
|
|
|
|
2014-06-12 11:58:28 +02:00
|
|
|
def cert_summary(msg, certs, indent=' '):
|
2012-11-15 14:57:52 -05:00
|
|
|
if msg:
|
|
|
|
|
s = '%s\n' % msg
|
|
|
|
|
else:
|
|
|
|
|
s = ''
|
2014-06-12 11:58:28 +02:00
|
|
|
for cert in certs:
|
|
|
|
|
s += '%sSubject: %s\n' % (indent, cert.subject)
|
|
|
|
|
s += '%sIssuer: %s\n' % (indent, cert.issuer)
|
|
|
|
|
s += '%sValid From: %s\n' % (indent, cert.valid_not_before_str)
|
|
|
|
|
s += '%sValid Until: %s\n' % (indent, cert.valid_not_after_str)
|
|
|
|
|
s += '\n'
|
|
|
|
|
s = s[:-1]
|
2012-11-15 14:57:52 -05:00
|
|
|
|
|
|
|
|
return s
|
|
|
|
|
|
|
|
|
|
def get_cert_path(cert_path):
|
|
|
|
|
"""
|
|
|
|
|
If a CA certificate is passed in on the command line, use that.
|
|
|
|
|
|
|
|
|
|
Else if a CA file exists in CACERT then use that.
|
|
|
|
|
|
|
|
|
|
Otherwise return None.
|
|
|
|
|
"""
|
|
|
|
|
if cert_path is not None:
|
|
|
|
|
return cert_path
|
|
|
|
|
|
|
|
|
|
if os.path.exists(CACERT):
|
|
|
|
|
return CACERT
|
|
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
|
2013-11-07 17:18:32 +01:00
|
|
|
|
|
|
|
|
def save_state(service):
|
|
|
|
|
enabled = service.is_enabled()
|
|
|
|
|
running = service.is_running()
|
|
|
|
|
|
|
|
|
|
if enabled or running:
|
|
|
|
|
statestore.backup_state(service.service_name, 'enabled', enabled)
|
|
|
|
|
statestore.backup_state(service.service_name, 'running', running)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def restore_state(service):
|
|
|
|
|
enabled = statestore.restore_state(service.service_name, 'enabled')
|
|
|
|
|
running = statestore.restore_state(service.service_name, 'running')
|
|
|
|
|
|
|
|
|
|
if enabled:
|
|
|
|
|
try:
|
|
|
|
|
service.enable()
|
|
|
|
|
except Exception:
|
|
|
|
|
root_logger.warning(
|
|
|
|
|
"Failed to configure automatic startup of the %s daemon",
|
|
|
|
|
service.service_name
|
|
|
|
|
)
|
|
|
|
|
if running:
|
|
|
|
|
try:
|
|
|
|
|
service.start()
|
|
|
|
|
except Exception:
|
|
|
|
|
root_logger.warning(
|
|
|
|
|
"Failed to restart the %s daemon",
|
|
|
|
|
service.service_name
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
2011-12-05 10:19:10 +01:00
|
|
|
# Checks whether nss_ldap or nss-pam-ldapd is installed. If anyone of mandatory files was found returns True and list of all files found.
|
|
|
|
|
def nssldap_exists():
|
2014-06-17 11:45:43 +02:00
|
|
|
files_to_check = [{'function':'configure_ldap_conf', 'mandatory':[paths.LDAP_CONF,paths.NSS_LDAP_CONF,paths.LIBNSS_LDAP_CONF], 'optional':[paths.PAM_LDAP_CONF]},
|
|
|
|
|
{'function':'configure_nslcd_conf', 'mandatory':[paths.NSLCD_CONF]}]
|
2011-12-05 10:19:10 +01:00
|
|
|
files_found = {}
|
|
|
|
|
retval = False
|
|
|
|
|
|
|
|
|
|
for function in files_to_check:
|
|
|
|
|
files_found[function['function']]=[]
|
|
|
|
|
for file_type in ['mandatory','optional']:
|
|
|
|
|
try:
|
|
|
|
|
for filename in function[file_type]:
|
|
|
|
|
if file_exists(filename):
|
|
|
|
|
files_found[function['function']].append(filename)
|
|
|
|
|
if file_type == 'mandatory':
|
|
|
|
|
retval = True
|
|
|
|
|
except KeyError:
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
return (retval, files_found)
|
|
|
|
|
|
2012-08-17 08:56:45 -04:00
|
|
|
# helper function for uninstall
|
|
|
|
|
# deletes IPA domain from sssd.conf
|
|
|
|
|
def delete_ipa_domain():
|
2014-05-29 10:37:18 +02:00
|
|
|
sssd = services.service('sssd')
|
2012-08-17 08:56:45 -04:00
|
|
|
try:
|
|
|
|
|
sssdconfig = SSSDConfig.SSSDConfig()
|
|
|
|
|
sssdconfig.import_config()
|
|
|
|
|
domains = sssdconfig.list_active_domains()
|
|
|
|
|
|
|
|
|
|
ipa_domain_name = None
|
|
|
|
|
|
|
|
|
|
for name in domains:
|
|
|
|
|
domain = sssdconfig.get_domain(name)
|
|
|
|
|
try:
|
|
|
|
|
provider = domain.get_option('id_provider')
|
|
|
|
|
if provider == "ipa":
|
|
|
|
|
ipa_domain_name = name
|
|
|
|
|
break
|
|
|
|
|
except SSSDConfig.NoOptionError:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if ipa_domain_name is not None:
|
|
|
|
|
sssdconfig.delete_domain(ipa_domain_name)
|
|
|
|
|
sssdconfig.write()
|
|
|
|
|
else:
|
|
|
|
|
root_logger.warning("IPA domain could not be found in "
|
|
|
|
|
"/etc/sssd/sssd.conf and therefore not deleted")
|
|
|
|
|
except IOError:
|
|
|
|
|
root_logger.warning("IPA domain could not be deleted. "
|
|
|
|
|
"No access to the /etc/sssd/sssd.conf file.")
|
|
|
|
|
|
2013-02-19 17:59:50 +01:00
|
|
|
def is_ipa_client_installed(on_master=False):
|
|
|
|
|
"""
|
|
|
|
|
Consider IPA client not installed if nothing is backed up
|
|
|
|
|
and default.conf file does not exist. If on_master is set to True,
|
|
|
|
|
the existence of default.conf file is not taken into consideration,
|
|
|
|
|
since it has been already created by ipa-server-install.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
installed = fstore.has_files() or \
|
2014-06-17 11:45:43 +02:00
|
|
|
(not on_master and os.path.exists(paths.IPA_DEFAULT_CONF))
|
2013-02-19 17:59:50 +01:00
|
|
|
|
|
|
|
|
return installed
|
|
|
|
|
|
2013-11-21 13:09:28 +01:00
|
|
|
def configure_nsswitch_database(fstore, database, services, preserve=True,
|
2014-08-27 09:10:59 +02:00
|
|
|
append=True, default_value=()):
|
2013-11-21 13:09:28 +01:00
|
|
|
"""
|
|
|
|
|
Edits the specified nsswitch.conf database (e.g. passwd, group, sudoers)
|
|
|
|
|
to use the specified service(s).
|
|
|
|
|
|
|
|
|
|
Arguments:
|
|
|
|
|
fstore - FileStore to backup the nsswitch.conf
|
|
|
|
|
database - database configuration that should be ammended, e.g 'sudoers'
|
|
|
|
|
service - list of services that should be added, e.g. ['sss']
|
|
|
|
|
preserve - if True, the already configured services will be preserved
|
|
|
|
|
|
|
|
|
|
The next arguments modify the behaviour if preserve=True:
|
|
|
|
|
append - if True, the services will be appended, if False, prepended
|
|
|
|
|
default_value - list of services that are considered as default (if
|
|
|
|
|
the database is not mentioned in nsswitch.conf), e.g.
|
|
|
|
|
['files']
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
# Backup the original version of nsswitch.conf, we're going to edit it now
|
2014-06-17 11:45:43 +02:00
|
|
|
if not fstore.has_file(paths.NSSWITCH_CONF):
|
|
|
|
|
fstore.backup_file(paths.NSSWITCH_CONF)
|
2013-11-21 13:09:28 +01:00
|
|
|
|
|
|
|
|
conf = ipaclient.ipachangeconf.IPAChangeConf("IPA Installer")
|
|
|
|
|
conf.setOptionAssignment(':')
|
|
|
|
|
|
|
|
|
|
if preserve:
|
|
|
|
|
# Read the existing configuration
|
2014-06-17 11:45:43 +02:00
|
|
|
with open(paths.NSSWITCH_CONF, 'r') as f:
|
2013-11-21 13:09:28 +01:00
|
|
|
opts = conf.parse(f)
|
|
|
|
|
raw_database_entry = conf.findOpts(opts, 'option', database)[1]
|
|
|
|
|
|
2014-08-27 09:10:59 +02:00
|
|
|
# Detect the list of already configured services
|
|
|
|
|
if not raw_database_entry:
|
|
|
|
|
# If there is no database entry, database is not present in
|
|
|
|
|
# the nsswitch.conf. Set the list of services to the
|
|
|
|
|
# default list, if passed.
|
|
|
|
|
configured_services = list(default_value)
|
|
|
|
|
else:
|
|
|
|
|
configured_services = raw_database_entry['value'].strip().split()
|
|
|
|
|
|
|
|
|
|
# Make sure no service is added if already mentioned in the list
|
|
|
|
|
added_services = [s for s in services
|
|
|
|
|
if s not in configured_services]
|
2013-11-21 13:09:28 +01:00
|
|
|
|
2014-08-27 09:10:59 +02:00
|
|
|
# Prepend / append the list of new services
|
2013-11-21 13:09:28 +01:00
|
|
|
if append:
|
2014-08-27 09:10:59 +02:00
|
|
|
new_value = ' ' + ' '.join(configured_services + added_services)
|
2013-11-21 13:09:28 +01:00
|
|
|
else:
|
2014-08-27 09:10:59 +02:00
|
|
|
new_value = ' ' + ' '.join(added_services + configured_services)
|
2013-11-21 13:09:28 +01:00
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
# Preserve not set, let's rewrite existing configuration
|
2014-08-27 09:10:59 +02:00
|
|
|
new_value = ' ' + ' '.join(services)
|
2013-11-21 13:09:28 +01:00
|
|
|
|
|
|
|
|
# Set new services as sources for database
|
|
|
|
|
opts = [{'name': database,
|
|
|
|
|
'type':'option',
|
|
|
|
|
'action':'set',
|
2014-08-27 09:10:59 +02:00
|
|
|
'value': new_value
|
2013-11-21 13:09:28 +01:00
|
|
|
},
|
|
|
|
|
{'name':'empty',
|
|
|
|
|
'type':'empty'
|
|
|
|
|
}]
|
|
|
|
|
|
2014-06-17 11:45:43 +02:00
|
|
|
conf.changeConf(paths.NSSWITCH_CONF, opts)
|
|
|
|
|
root_logger.info("Configured %s in %s" % (database, paths.NSSWITCH_CONF))
|
2013-11-21 13:09:28 +01:00
|
|
|
|
|
|
|
|
|
2012-06-08 09:36:38 -04:00
|
|
|
def uninstall(options, env):
|
2008-03-31 17:33:55 -04:00
|
|
|
|
2013-02-19 17:59:50 +01:00
|
|
|
if not is_ipa_client_installed():
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("IPA client is not configured on this system.")
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_NOT_CONFIGURED
|
2010-05-06 22:13:41 -04:00
|
|
|
|
2014-06-17 11:45:43 +02:00
|
|
|
server_fstore = sysrestore.FileStore(paths.SYSRESTORE)
|
2011-04-29 16:23:05 +02:00
|
|
|
if server_fstore.has_files() and not options.on_master:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
|
|
|
|
"IPA client is configured as a part of IPA server on this system.")
|
|
|
|
|
root_logger.info("Refer to ipa-server-install for uninstallation.")
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_NOT_CONFIGURED
|
2011-03-24 11:01:40 +01:00
|
|
|
|
2012-05-29 14:20:38 -04:00
|
|
|
try:
|
|
|
|
|
run(["ipa-client-automount", "--uninstall", "--debug"])
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-05-29 14:20:38 -04:00
|
|
|
root_logger.error(
|
|
|
|
|
"Unconfigured automount client failed: %s", str(e))
|
|
|
|
|
|
|
|
|
|
# Reload the state as automount unconfigure may have modified it
|
|
|
|
|
fstore._load()
|
|
|
|
|
statestore._load()
|
|
|
|
|
|
2011-03-04 17:53:42 -05:00
|
|
|
hostname = None
|
2015-06-30 15:55:40 +02:00
|
|
|
ipa_domain = None
|
2011-10-12 19:14:55 +03:00
|
|
|
was_sssd_configured = False
|
|
|
|
|
try:
|
|
|
|
|
sssdconfig = SSSDConfig.SSSDConfig()
|
|
|
|
|
sssdconfig.import_config()
|
|
|
|
|
domains = sssdconfig.list_active_domains()
|
2012-08-17 08:56:45 -04:00
|
|
|
all_domains = sssdconfig.list_domains()
|
|
|
|
|
|
|
|
|
|
# we consider all the domains, because handling sssd.conf
|
|
|
|
|
# during uninstall is dependant on was_sssd_configured flag
|
|
|
|
|
# so the user does not lose info about inactive domains
|
|
|
|
|
if len(all_domains) > 1:
|
2011-10-12 19:14:55 +03:00
|
|
|
# There was more than IPA domain configured
|
|
|
|
|
was_sssd_configured = True
|
|
|
|
|
for name in domains:
|
|
|
|
|
domain = sssdconfig.get_domain(name)
|
2011-03-04 17:53:42 -05:00
|
|
|
try:
|
2011-10-12 19:14:55 +03:00
|
|
|
provider = domain.get_option('id_provider')
|
2011-03-04 17:53:42 -05:00
|
|
|
except SSSDConfig.NoOptionError:
|
|
|
|
|
continue
|
2011-10-12 19:14:55 +03:00
|
|
|
if provider == "ipa":
|
|
|
|
|
try:
|
|
|
|
|
hostname = domain.get_option('ipa_hostname')
|
|
|
|
|
except SSSDConfig.NoOptionError:
|
|
|
|
|
continue
|
2015-06-30 15:55:40 +02:00
|
|
|
try:
|
|
|
|
|
ipa_domain = domain.get_option('ipa_domain')
|
|
|
|
|
except SSSDConfig.NoOptionError:
|
|
|
|
|
pass
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2011-10-12 19:14:55 +03:00
|
|
|
# We were unable to read existing SSSD config. This might mean few things:
|
|
|
|
|
# - sssd wasn't installed
|
|
|
|
|
# - sssd was removed after install and before uninstall
|
|
|
|
|
# - there are no active domains
|
|
|
|
|
# in both cases we cannot continue with SSSD
|
|
|
|
|
pass
|
2011-03-04 17:53:42 -05:00
|
|
|
|
|
|
|
|
if hostname is None:
|
|
|
|
|
hostname = socket.getfqdn()
|
|
|
|
|
|
2014-10-07 19:07:13 +02:00
|
|
|
ipa_db = certdb.NSSDatabase(paths.IPA_NSSDB_DIR)
|
|
|
|
|
sys_db = certdb.NSSDatabase(paths.NSS_DB_DIR)
|
2011-03-04 17:53:42 -05:00
|
|
|
|
2014-05-29 10:37:18 +02:00
|
|
|
cmonger = services.knownservices.certmonger
|
2014-10-07 19:07:13 +02:00
|
|
|
if ipa_db.has_nickname('Local IPA host'):
|
|
|
|
|
try:
|
|
|
|
|
certmonger.stop_tracking(paths.IPA_NSSDB_DIR,
|
|
|
|
|
nickname='Local IPA host')
|
2015-07-30 16:49:29 +02:00
|
|
|
except RuntimeError as e:
|
2014-10-07 19:07:13 +02:00
|
|
|
root_logger.error("%s failed to stop tracking certificate: %s",
|
|
|
|
|
cmonger.service_name, e)
|
|
|
|
|
|
|
|
|
|
client_nss_nickname = 'IPA Machine Certificate - %s' % hostname
|
|
|
|
|
if sys_db.has_nickname(client_nss_nickname):
|
|
|
|
|
try:
|
|
|
|
|
certmonger.stop_tracking(paths.NSS_DB_DIR,
|
|
|
|
|
nickname=client_nss_nickname)
|
2015-07-30 16:49:29 +02:00
|
|
|
except RuntimeError as e:
|
2014-10-07 19:07:13 +02:00
|
|
|
root_logger.error("%s failed to stop tracking certificate: %s",
|
|
|
|
|
cmonger.service_name, e)
|
2011-05-10 15:14:20 +02:00
|
|
|
|
2014-06-12 13:40:56 +02:00
|
|
|
# Remove our host cert and CA cert
|
2014-09-22 11:13:15 +02:00
|
|
|
try:
|
|
|
|
|
ipa_certs = ipa_db.list_certs()
|
2015-07-30 16:49:29 +02:00
|
|
|
except CalledProcessError as e:
|
2014-09-22 11:13:15 +02:00
|
|
|
root_logger.error(
|
|
|
|
|
"Failed to list certificates in %s: %s", ipa_db.secdir, e)
|
|
|
|
|
ipa_certs = []
|
|
|
|
|
|
|
|
|
|
for filename in (os.path.join(ipa_db.secdir, 'cert8.db'),
|
|
|
|
|
os.path.join(ipa_db.secdir, 'key3.db'),
|
|
|
|
|
os.path.join(ipa_db.secdir, 'secmod.db'),
|
|
|
|
|
os.path.join(ipa_db.secdir, 'pwdfile.txt')):
|
2015-04-14 13:55:33 +02:00
|
|
|
remove_file(filename)
|
2014-09-18 16:28:59 +02:00
|
|
|
|
2014-09-22 11:13:15 +02:00
|
|
|
for nickname, trust_flags in ipa_certs:
|
|
|
|
|
while sys_db.has_nickname(nickname):
|
|
|
|
|
try:
|
|
|
|
|
sys_db.delete_cert(nickname)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2014-09-22 11:13:15 +02:00
|
|
|
root_logger.error("Failed to remove %s from %s: %s",
|
|
|
|
|
nickname, sys_db.secdir, e)
|
|
|
|
|
break
|
2010-05-03 15:15:43 -04:00
|
|
|
|
2015-07-07 15:49:51 +02:00
|
|
|
# Remove any special principal names we added to the IPA CA helper
|
|
|
|
|
certmonger.remove_principal_from_cas()
|
|
|
|
|
|
2010-02-03 15:41:02 -05:00
|
|
|
try:
|
2011-09-13 00:11:24 +03:00
|
|
|
cmonger.stop()
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2011-09-13 00:11:24 +03:00
|
|
|
log_service_error(cmonger.service_name, 'stop', e)
|
2010-02-03 15:41:02 -05:00
|
|
|
|
|
|
|
|
try:
|
2011-09-13 00:11:24 +03:00
|
|
|
cmonger.disable()
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
|
|
|
|
"Failed to disable automatic startup of the %s service: %s",
|
|
|
|
|
cmonger.service_name, str(e))
|
2010-02-03 15:41:02 -05:00
|
|
|
|
2014-06-17 11:45:43 +02:00
|
|
|
if not options.on_master and os.path.exists(paths.IPA_DEFAULT_CONF):
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Unenrolling client from IPA server")
|
2014-06-17 11:45:43 +02:00
|
|
|
join_args = [paths.SBIN_IPA_JOIN, "--unenroll", "-h", hostname]
|
2011-10-20 11:29:26 -04:00
|
|
|
if options.debug:
|
|
|
|
|
join_args.append("-d")
|
|
|
|
|
env['XMLRPC_TRACE_CURL'] = 'yes'
|
2010-09-21 15:57:46 -04:00
|
|
|
(stdout, stderr, returncode) = run(join_args, raiseonerr=False, env=env)
|
|
|
|
|
if returncode != 0:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Unenrolling host failed: %s", stderr)
|
2010-09-17 21:37:32 -04:00
|
|
|
|
2014-06-17 11:45:43 +02:00
|
|
|
if os.path.exists(paths.IPA_DEFAULT_CONF):
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info(
|
|
|
|
|
"Removing Kerberos service principals from /etc/krb5.keytab")
|
2011-08-29 17:44:02 -04:00
|
|
|
try:
|
|
|
|
|
parser = RawConfigParser()
|
2014-06-17 11:45:43 +02:00
|
|
|
fp = open(paths.IPA_DEFAULT_CONF, 'r')
|
2011-08-29 17:44:02 -04:00
|
|
|
parser.readfp(fp)
|
|
|
|
|
fp.close()
|
|
|
|
|
realm = parser.get('global', 'realm')
|
2014-06-17 11:45:43 +02:00
|
|
|
run([paths.IPA_RMKEYTAB, "-k", paths.KRB5_KEYTAB, "-r", realm])
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
|
|
|
|
"Failed to remove Kerberos service principals: %s", str(e))
|
2010-04-16 17:36:55 -04:00
|
|
|
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Disabling client Kerberos and LDAP configurations")
|
2011-10-12 19:14:55 +03:00
|
|
|
was_sssd_installed = False
|
2011-12-07 03:49:09 -05:00
|
|
|
was_sshd_configured = False
|
2011-10-12 19:14:55 +03:00
|
|
|
if fstore.has_files():
|
2014-06-17 11:45:43 +02:00
|
|
|
was_sssd_installed = fstore.has_file(paths.SSSD_CONF)
|
2011-12-07 03:49:09 -05:00
|
|
|
|
2014-05-29 10:37:18 +02:00
|
|
|
sshd_config = os.path.join(services.knownservices.sshd.get_config_dir(), "sshd_config")
|
2011-12-07 03:49:09 -05:00
|
|
|
was_sshd_configured = fstore.has_file(sshd_config)
|
2008-03-31 17:33:55 -04:00
|
|
|
try:
|
2014-05-27 09:13:59 +02:00
|
|
|
tasks.restore_pre_ipa_client_configuration(fstore,
|
|
|
|
|
statestore,
|
|
|
|
|
was_sssd_installed,
|
|
|
|
|
was_sssd_configured)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
|
|
|
|
"Failed to remove krb5/LDAP configuration: %s", str(e))
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
2015-06-05 11:27:46 -06:00
|
|
|
# Clean up the SSSD cache before SSSD service is stopped or restarted
|
|
|
|
|
remove_file(paths.SSSD_MC_GROUP)
|
|
|
|
|
remove_file(paths.SSSD_MC_PASSWD)
|
|
|
|
|
|
2015-06-30 15:55:40 +02:00
|
|
|
if ipa_domain:
|
|
|
|
|
sssd_domain_ldb = "cache_" + ipa_domain + ".ldb"
|
|
|
|
|
sssd_ldb_file = os.path.join(paths.SSSD_DB, sssd_domain_ldb)
|
|
|
|
|
remove_file(sssd_ldb_file)
|
2015-06-05 11:27:46 -06:00
|
|
|
|
2015-06-30 15:55:40 +02:00
|
|
|
sssd_domain_ccache = "ccache_" + ipa_domain.upper()
|
|
|
|
|
sssd_ccache_file = os.path.join(paths.SSSD_DB, sssd_domain_ccache)
|
|
|
|
|
remove_file(sssd_ccache_file)
|
2015-06-05 11:27:46 -06:00
|
|
|
|
2012-08-17 08:56:45 -04:00
|
|
|
# Next if-elif-elif construction deals with sssd.conf file.
|
|
|
|
|
# Old pre-IPA domains are preserved due merging the old sssd.conf
|
|
|
|
|
# during the installation of ipa-client but any new domains are
|
|
|
|
|
# only present in sssd.conf now, so we don't want to delete them
|
|
|
|
|
# by rewriting sssd.conf file. IPA domain is removed gracefully.
|
|
|
|
|
|
|
|
|
|
# SSSD was installed before our installation and other non-IPA domains
|
|
|
|
|
# found, restore backed up sssd.conf to sssd.conf.bkp and remove IPA
|
|
|
|
|
# domain from the current sssd.conf
|
|
|
|
|
if was_sssd_installed and was_sssd_configured:
|
|
|
|
|
root_logger.info(
|
|
|
|
|
"The original configuration of SSSD included other domains than " +
|
|
|
|
|
"the IPA-based one.")
|
|
|
|
|
|
|
|
|
|
delete_ipa_domain()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
restored = False
|
|
|
|
|
try:
|
2014-06-17 11:45:43 +02:00
|
|
|
restored = fstore.restore_file(paths.SSSD_CONF,paths.SSSD_CONF_BKP)
|
2012-08-17 08:56:45 -04:00
|
|
|
except OSError:
|
|
|
|
|
root_logger.debug("Error while restoring pre-IPA /etc/sssd/sssd.conf.")
|
|
|
|
|
|
|
|
|
|
if restored:
|
|
|
|
|
root_logger.info("Original pre-IPA SSSD configuration file was "
|
|
|
|
|
"restored to /etc/sssd/sssd.conf.bkp.")
|
|
|
|
|
|
|
|
|
|
root_logger.info("IPA domain removed from current one, " +
|
|
|
|
|
"restarting SSSD service")
|
2014-05-29 10:37:18 +02:00
|
|
|
sssd = services.service('sssd')
|
2012-08-17 08:56:45 -04:00
|
|
|
try:
|
|
|
|
|
sssd.restart()
|
|
|
|
|
except CalledProcessError:
|
|
|
|
|
root_logger.warning("SSSD service restart was unsuccessful.")
|
|
|
|
|
|
|
|
|
|
# SSSD was not installed before our installation, but other domains found,
|
|
|
|
|
# delete IPA domain, but leave other domains intact
|
|
|
|
|
elif not was_sssd_installed and was_sssd_configured:
|
|
|
|
|
delete_ipa_domain()
|
|
|
|
|
root_logger.info("Other domains than IPA domain found, " +
|
|
|
|
|
"IPA domain was removed from /etc/sssd/sssd.conf.")
|
|
|
|
|
|
2014-05-29 10:37:18 +02:00
|
|
|
sssd = services.service('sssd')
|
2012-08-17 08:56:45 -04:00
|
|
|
try:
|
|
|
|
|
sssd.restart()
|
|
|
|
|
except CalledProcessError:
|
|
|
|
|
root_logger.warning("SSSD service restart was unsuccessful.")
|
|
|
|
|
|
|
|
|
|
# SSSD was not installed before our installation, and no other domains
|
|
|
|
|
# than IPA are configured in sssd.conf - make sure config file is removed
|
|
|
|
|
elif not was_sssd_installed and not was_sssd_configured:
|
|
|
|
|
try:
|
2014-06-17 11:45:43 +02:00
|
|
|
os.rename(paths.SSSD_CONF,paths.SSSD_CONF_DELETED)
|
2012-08-17 08:56:45 -04:00
|
|
|
except OSError:
|
2014-06-17 11:45:43 +02:00
|
|
|
root_logger.debug("Error while moving /etc/sssd/sssd.conf to %s" %
|
|
|
|
|
paths.SSSD_CONF_DELETED)
|
2012-08-17 08:56:45 -04:00
|
|
|
|
|
|
|
|
root_logger.info("Redundant SSSD configuration file " +
|
|
|
|
|
"/etc/sssd/sssd.conf was moved to /etc/sssd/sssd.conf.deleted")
|
|
|
|
|
|
2014-05-29 10:37:18 +02:00
|
|
|
sssd = services.service('sssd')
|
2012-12-18 13:22:40 +01:00
|
|
|
try:
|
|
|
|
|
sssd.stop()
|
|
|
|
|
except CalledProcessError:
|
|
|
|
|
root_logger.warning("SSSD service could not be stopped")
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
sssd.disable()
|
2015-07-30 16:49:29 +02:00
|
|
|
except CalledProcessError as e:
|
2012-12-18 13:22:40 +01:00
|
|
|
root_logger.warning(
|
|
|
|
|
"Failed to disable automatic startup of the SSSD daemon: %s", e)
|
|
|
|
|
|
2011-08-29 17:44:02 -04:00
|
|
|
if fstore.has_files():
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Restoring client configuration files")
|
2014-05-29 10:18:21 +02:00
|
|
|
tasks.restore_network_configuration(fstore, statestore)
|
2011-08-29 17:44:02 -04:00
|
|
|
fstore.restore_all_files()
|
2008-03-31 17:33:55 -04:00
|
|
|
|
2012-12-05 10:50:05 +01:00
|
|
|
ipautil.restore_hostname(statestore)
|
2013-09-25 13:45:45 +02:00
|
|
|
unconfigure_nisdomain()
|
2010-04-16 17:36:55 -04:00
|
|
|
|
2014-05-29 10:37:18 +02:00
|
|
|
nscd = services.knownservices.nscd
|
|
|
|
|
nslcd = services.knownservices.nslcd
|
2011-05-18 17:06:15 +02:00
|
|
|
|
2013-11-07 17:18:32 +01:00
|
|
|
for service in (nscd, nslcd):
|
|
|
|
|
if service.is_installed():
|
|
|
|
|
restore_state(service)
|
|
|
|
|
else:
|
|
|
|
|
# this is an optional service, just log
|
|
|
|
|
root_logger.info(
|
|
|
|
|
"%s daemon is not installed, skip configuration",
|
|
|
|
|
service.service_name
|
|
|
|
|
)
|
2011-05-18 17:06:15 +02:00
|
|
|
|
2011-10-04 13:56:12 +03:00
|
|
|
ntp_configured = statestore.has_state('ntp')
|
|
|
|
|
if ntp_configured:
|
|
|
|
|
ntp_enabled = statestore.restore_state('ntp', 'enabled')
|
|
|
|
|
ntp_step_tickers = statestore.restore_state('ntp', 'step-tickers')
|
2011-10-05 15:11:29 +03:00
|
|
|
restored = False
|
2011-10-04 13:56:12 +03:00
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# Restore might fail due to file missing in backup
|
|
|
|
|
# the reason for it might be that freeipa-client was updated
|
|
|
|
|
# to this version but not unenrolled/enrolled again
|
|
|
|
|
# In such case it is OK to fail
|
2014-06-17 11:45:43 +02:00
|
|
|
restored = fstore.restore_file(paths.NTP_CONF)
|
|
|
|
|
restored |= fstore.restore_file(paths.SYSCONFIG_NTPD)
|
2011-10-04 13:56:12 +03:00
|
|
|
if ntp_step_tickers:
|
2014-06-17 11:45:43 +02:00
|
|
|
restored |= fstore.restore_file(paths.NTP_STEP_TICKERS)
|
2012-09-25 08:35:06 -04:00
|
|
|
except Exception:
|
2011-10-04 13:56:12 +03:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
if not ntp_enabled:
|
2014-05-29 10:37:18 +02:00
|
|
|
services.knownservices.ntpd.stop()
|
|
|
|
|
services.knownservices.ntpd.disable()
|
2011-10-04 13:56:12 +03:00
|
|
|
else:
|
|
|
|
|
if restored:
|
2014-05-29 10:37:18 +02:00
|
|
|
services.knownservices.ntpd.restart()
|
2011-10-04 13:56:12 +03:00
|
|
|
|
2014-07-08 07:36:48 +02:00
|
|
|
try:
|
|
|
|
|
ipaclient.ntpconf.restore_forced_ntpd(statestore)
|
2015-07-30 16:49:29 +02:00
|
|
|
except CalledProcessError as e:
|
2014-07-08 07:36:48 +02:00
|
|
|
root_logger.error('Failed to start chronyd: %s', e)
|
2012-12-07 16:44:32 +01:00
|
|
|
|
2014-05-29 10:37:18 +02:00
|
|
|
if was_sshd_configured and services.knownservices.sshd.is_running():
|
|
|
|
|
services.knownservices.sshd.restart()
|
2011-12-07 03:49:09 -05:00
|
|
|
|
2013-11-04 11:52:02 +01:00
|
|
|
# Remove the Firefox configuration
|
|
|
|
|
if statestore.has_state('firefox'):
|
|
|
|
|
root_logger.info("Removing Firefox configuration.")
|
|
|
|
|
preferences_fname = statestore.restore_state('firefox', 'preferences_fname')
|
|
|
|
|
if preferences_fname is not None:
|
|
|
|
|
if file_exists(preferences_fname):
|
|
|
|
|
try:
|
|
|
|
|
os.remove(preferences_fname)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2013-11-04 11:52:02 +01:00
|
|
|
root_logger.warning("'%s' could not be removed: %s." % preferences_fname, str(e))
|
|
|
|
|
root_logger.warning("Please remove file '%s' manually." % preferences_fname)
|
|
|
|
|
|
2012-05-29 14:20:38 -04:00
|
|
|
rv = 0
|
|
|
|
|
|
|
|
|
|
if fstore.has_files():
|
2014-06-17 11:45:43 +02:00
|
|
|
root_logger.error('Some files have not been restored, see %s' %
|
|
|
|
|
paths.SYSRESTORE_INDEX)
|
2012-05-29 14:20:38 -04:00
|
|
|
has_state = False
|
|
|
|
|
for module in statestore.modules.keys():
|
2013-03-13 12:53:24 +01:00
|
|
|
root_logger.error('Some installation state for %s has not been '
|
|
|
|
|
'restored, see /var/lib/ipa/sysrestore/sysrestore.state',
|
|
|
|
|
module)
|
2012-05-29 14:20:38 -04:00
|
|
|
has_state = True
|
|
|
|
|
rv = 1
|
|
|
|
|
|
|
|
|
|
if has_state:
|
2013-03-13 12:53:24 +01:00
|
|
|
root_logger.warning(
|
|
|
|
|
'Some installation state has not been restored.\n'
|
|
|
|
|
'This may cause re-installation to fail.\n'
|
|
|
|
|
'It should be safe to remove /var/lib/ipa-client/sysrestore.state '
|
|
|
|
|
'but it may\n mean your system hasn\'t been restored '
|
|
|
|
|
'to its pre-installation state.')
|
2012-05-29 14:20:38 -04:00
|
|
|
|
2010-01-28 14:22:50 -05:00
|
|
|
# Remove the IPA configuration file
|
2015-04-14 13:55:33 +02:00
|
|
|
remove_file(paths.IPA_DEFAULT_CONF)
|
2010-01-28 14:22:50 -05:00
|
|
|
|
2013-09-24 10:54:57 +02:00
|
|
|
# Remove the CA cert from the systemwide certificate store
|
2014-06-12 17:20:19 +02:00
|
|
|
tasks.remove_ca_certs_from_systemwide_ca_store()
|
2013-09-24 10:54:57 +02:00
|
|
|
|
2013-04-02 19:48:38 +02:00
|
|
|
# Remove the CA cert
|
2015-04-14 13:55:33 +02:00
|
|
|
remove_file(CACERT)
|
2013-04-02 19:48:38 +02:00
|
|
|
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Client uninstall complete.")
|
|
|
|
|
|
2013-03-13 12:53:24 +01:00
|
|
|
# The next block of code prompts for reboot, therefore all uninstall
|
|
|
|
|
# logic has to be done before
|
|
|
|
|
|
|
|
|
|
if not options.unattended:
|
|
|
|
|
root_logger.info(
|
|
|
|
|
"The original nsswitch.conf configuration has been restored.")
|
|
|
|
|
root_logger.info(
|
|
|
|
|
"You may need to restart services or reboot the machine.")
|
|
|
|
|
if not options.on_master:
|
|
|
|
|
if user_input("Do you want to reboot the machine?", False):
|
|
|
|
|
try:
|
2014-06-17 11:45:43 +02:00
|
|
|
run([paths.SBIN_REBOOT])
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2013-03-13 12:53:24 +01:00
|
|
|
root_logger.error(
|
|
|
|
|
"Reboot command failed to exceute: %s", str(e))
|
|
|
|
|
return CLIENT_UNINSTALL_ERROR
|
|
|
|
|
|
|
|
|
|
# IMPORTANT: Do not put any client uninstall logic after the block above
|
|
|
|
|
|
2012-05-29 14:20:38 -04:00
|
|
|
return rv
|
2011-08-29 17:44:02 -04:00
|
|
|
|
2014-08-27 16:02:35 +02:00
|
|
|
def configure_ipa_conf(fstore, cli_basedn, cli_realm, cli_domain, cli_server, hostname):
|
2009-11-19 14:14:42 -05:00
|
|
|
ipaconf = ipaclient.ipachangeconf.IPAChangeConf("IPA Installer")
|
|
|
|
|
ipaconf.setOptionAssignment(" = ")
|
|
|
|
|
ipaconf.setSectionNameDelimiters(("[","]"))
|
|
|
|
|
|
|
|
|
|
opts = [{'name':'comment', 'type':'comment', 'value':'File modified by ipa-client-install'},
|
|
|
|
|
{'name':'empty', 'type':'empty'}]
|
|
|
|
|
|
|
|
|
|
#[global]
|
|
|
|
|
defopts = [{'name':'basedn', 'type':'option', 'value':cli_basedn},
|
|
|
|
|
{'name':'realm', 'type':'option', 'value':cli_realm},
|
|
|
|
|
{'name':'domain', 'type':'option', 'value':cli_domain},
|
2012-07-03 17:37:22 -04:00
|
|
|
{'name':'server', 'type':'option', 'value':cli_server[0]},
|
2014-08-27 16:02:35 +02:00
|
|
|
{'name':'host', 'type':'option', 'value':hostname},
|
2012-07-03 17:37:22 -04:00
|
|
|
{'name':'xmlrpc_uri', 'type':'option', 'value':'https://%s/ipa/xml' % ipautil.format_netloc(cli_server[0])},
|
2010-04-16 17:36:55 -04:00
|
|
|
{'name':'enable_ra', 'type':'option', 'value':'True'}]
|
2009-11-19 14:14:42 -05:00
|
|
|
|
|
|
|
|
opts.append({'name':'global', 'type':'section', 'value':defopts})
|
|
|
|
|
opts.append({'name':'empty', 'type':'empty'})
|
|
|
|
|
|
2014-06-17 11:45:43 +02:00
|
|
|
target_fname = paths.IPA_DEFAULT_CONF
|
2011-08-30 16:32:40 +02:00
|
|
|
fstore.backup_file(target_fname)
|
|
|
|
|
ipaconf.newConf(target_fname, opts)
|
2015-07-15 16:38:06 +02:00
|
|
|
os.chmod(target_fname, 0o644)
|
2009-11-19 14:14:42 -05:00
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
|
2013-03-28 17:41:05 +01:00
|
|
|
|
|
|
|
|
def disable_ra():
|
|
|
|
|
"""Set the enable_ra option in /etc/ipa/default.conf to False
|
|
|
|
|
|
|
|
|
|
Note that api.env will retain the old value (it is readonly).
|
|
|
|
|
"""
|
|
|
|
|
parser = RawConfigParser()
|
2014-06-17 11:45:43 +02:00
|
|
|
parser.read(paths.IPA_DEFAULT_CONF)
|
2013-03-28 17:41:05 +01:00
|
|
|
parser.set('global', 'enable_ra', 'False')
|
2014-06-17 11:45:43 +02:00
|
|
|
fp = open(paths.IPA_DEFAULT_CONF, 'w')
|
2013-03-28 17:41:05 +01:00
|
|
|
parser.write(fp)
|
|
|
|
|
fp.close()
|
|
|
|
|
|
|
|
|
|
|
2011-12-05 10:19:10 +01:00
|
|
|
def configure_ldap_conf(fstore, cli_basedn, cli_realm, cli_domain, cli_server, dnsok, options, files):
|
2009-11-19 14:14:42 -05:00
|
|
|
ldapconf = ipaclient.ipachangeconf.IPAChangeConf("IPA Installer")
|
|
|
|
|
ldapconf.setOptionAssignment(" ")
|
|
|
|
|
|
|
|
|
|
opts = [{'name':'comment', 'type':'comment', 'value':'File modified by ipa-client-install'},
|
|
|
|
|
{'name':'empty', 'type':'empty'},
|
|
|
|
|
{'name':'ldap_version', 'type':'option', 'value':'3'},
|
|
|
|
|
{'name':'base', 'type':'option', 'value':cli_basedn},
|
|
|
|
|
{'name':'empty', 'type':'empty'},
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 07:36:35 -04:00
|
|
|
{'name':'nss_base_passwd', 'type':'option', 'value':str(DN(('cn', 'users'), ('cn', 'accounts'), cli_basedn))+'?sub'},
|
|
|
|
|
{'name':'nss_base_group', 'type':'option', 'value':str(DN(('cn', 'groups'), ('cn', 'accounts'), cli_basedn))+'?sub'},
|
2009-11-19 14:14:42 -05:00
|
|
|
{'name':'nss_schema', 'type':'option', 'value':'rfc2307bis'},
|
|
|
|
|
{'name':'nss_map_attribute', 'type':'option', 'value':'uniqueMember member'},
|
|
|
|
|
{'name':'nss_initgroups_ignoreusers', 'type':'option', 'value':'root,dirsrv'},
|
|
|
|
|
{'name':'empty', 'type':'empty'},
|
|
|
|
|
{'name':'nss_reconnect_maxsleeptime', 'type':'option', 'value':'8'},
|
|
|
|
|
{'name':'nss_reconnect_sleeptime', 'type':'option', 'value':'1'},
|
|
|
|
|
{'name':'bind_timelimit', 'type':'option', 'value':'5'},
|
|
|
|
|
{'name':'timelimit', 'type':'option', 'value':'15'},
|
|
|
|
|
{'name':'empty', 'type':'empty'}]
|
|
|
|
|
if not dnsok or options.force or options.on_master:
|
|
|
|
|
if options.on_master:
|
|
|
|
|
opts.append({'name':'uri', 'type':'option', 'value':'ldap://localhost'})
|
|
|
|
|
else:
|
2012-07-03 17:37:22 -04:00
|
|
|
opts.append({'name':'uri', 'type':'option', 'value':'ldap://'+ipautil.format_netloc(cli_server[0])})
|
2009-11-19 14:14:42 -05:00
|
|
|
else:
|
|
|
|
|
opts.append({'name':'nss_srv_domain', 'type':'option', 'value':cli_domain})
|
|
|
|
|
|
|
|
|
|
opts.append({'name':'empty', 'type':'empty'})
|
2010-08-04 10:09:35 -04:00
|
|
|
|
|
|
|
|
# Depending on the release and distribution this may exist in any
|
|
|
|
|
# number of different file names, update what we find
|
2011-12-05 10:19:10 +01:00
|
|
|
for filename in files:
|
|
|
|
|
try:
|
|
|
|
|
fstore.backup_file(filename)
|
|
|
|
|
ldapconf.newConf(filename, opts)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Creation of %s failed: %s", filename, str(e))
|
2011-12-05 10:19:10 +01:00
|
|
|
return (1, 'LDAP', filename)
|
2010-08-04 10:09:35 -04:00
|
|
|
|
2011-10-14 11:29:35 -04:00
|
|
|
if files:
|
|
|
|
|
return (0, 'LDAP', ', '.join(files))
|
2012-06-08 09:36:38 -04:00
|
|
|
|
|
|
|
|
return 0, None, None
|
2010-08-04 10:09:35 -04:00
|
|
|
|
2011-12-05 10:19:10 +01:00
|
|
|
def configure_nslcd_conf(fstore, cli_basedn, cli_realm, cli_domain, cli_server, dnsok, options, files):
|
2010-08-04 10:09:35 -04:00
|
|
|
nslcdconf = ipaclient.ipachangeconf.IPAChangeConf("IPA Installer")
|
|
|
|
|
nslcdconf.setOptionAssignment(" ")
|
|
|
|
|
|
|
|
|
|
opts = [{'name':'comment', 'type':'comment', 'value':'File modified by ipa-client-install'},
|
|
|
|
|
{'name':'empty', 'type':'empty'},
|
|
|
|
|
{'name':'ldap_version', 'type':'option', 'value':'3'},
|
|
|
|
|
{'name':'base', 'type':'option', 'value':cli_basedn},
|
|
|
|
|
{'name':'empty', 'type':'empty'},
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 07:36:35 -04:00
|
|
|
{'name':'base passwd', 'type':'option', 'value':str(DN(('cn', 'users'), ('cn', 'accounts'), cli_basedn))},
|
|
|
|
|
{'name':'base group', 'type':'option', 'value':str(DN(('cn', 'groups'), ('cn', 'accounts'), cli_basedn))},
|
2010-08-04 10:09:35 -04:00
|
|
|
{'name':'timelimit', 'type':'option', 'value':'15'},
|
|
|
|
|
{'name':'empty', 'type':'empty'}]
|
|
|
|
|
if not dnsok or options.force or options.on_master:
|
|
|
|
|
if options.on_master:
|
|
|
|
|
opts.append({'name':'uri', 'type':'option', 'value':'ldap://localhost'})
|
|
|
|
|
else:
|
2012-07-03 17:37:22 -04:00
|
|
|
opts.append({'name':'uri', 'type':'option', 'value':'ldap://'+ipautil.format_netloc(cli_server[0])})
|
2010-08-04 10:09:35 -04:00
|
|
|
else:
|
|
|
|
|
opts.append({'name':'uri', 'type':'option', 'value':'DNS'})
|
|
|
|
|
|
|
|
|
|
opts.append({'name':'empty', 'type':'empty'})
|
|
|
|
|
|
2011-12-05 10:19:10 +01:00
|
|
|
for filename in files:
|
2010-08-04 10:09:35 -04:00
|
|
|
try:
|
2011-12-05 10:19:10 +01:00
|
|
|
fstore.backup_file(filename)
|
|
|
|
|
nslcdconf.newConf(filename, opts)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Creation of %s failed: %s", filename, str(e))
|
2011-07-29 13:05:07 +03:00
|
|
|
return (1, None, None)
|
2009-11-19 14:14:42 -05:00
|
|
|
|
2014-05-29 10:37:18 +02:00
|
|
|
nslcd = services.knownservices.nslcd
|
2011-09-13 00:11:24 +03:00
|
|
|
if nslcd.is_installed():
|
2011-05-18 17:06:15 +02:00
|
|
|
try:
|
2011-09-13 00:11:24 +03:00
|
|
|
nslcd.restart()
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2011-09-13 00:11:24 +03:00
|
|
|
log_service_error(nslcd.service_name, 'restart', e)
|
2011-05-18 17:06:15 +02:00
|
|
|
|
|
|
|
|
try:
|
2011-09-13 00:11:24 +03:00
|
|
|
nslcd.enable()
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
|
|
|
|
"Failed to enable automatic startup of the %s daemon: %s",
|
|
|
|
|
nslcd.service_name, str(e))
|
2011-05-18 17:06:15 +02:00
|
|
|
else:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.debug("%s daemon is not installed, skip configuration",
|
|
|
|
|
nslcd.service_name)
|
2011-07-29 13:05:07 +03:00
|
|
|
return (0, None, None)
|
2011-05-18 17:06:15 +02:00
|
|
|
|
2011-12-05 10:19:10 +01:00
|
|
|
return (0, 'NSLCD', ', '.join(files))
|
2009-11-19 14:14:42 -05:00
|
|
|
|
2012-01-31 22:44:20 -05:00
|
|
|
def configure_openldap_conf(fstore, cli_basedn, cli_server):
|
|
|
|
|
ldapconf = ipaclient.ipachangeconf.IPAChangeConf("IPA Installer")
|
2013-04-22 12:55:38 +02:00
|
|
|
ldapconf.setOptionAssignment((" ", "\t"))
|
2012-01-31 22:44:20 -05:00
|
|
|
|
2013-04-22 12:55:38 +02:00
|
|
|
opts = [{'name':'comment', 'type':'comment',
|
|
|
|
|
'value':' File modified by ipa-client-install'},
|
2012-01-31 22:44:20 -05:00
|
|
|
{'name':'empty', 'type':'empty'},
|
2013-04-22 12:55:38 +02:00
|
|
|
{'name':'comment', 'type':'comment',
|
|
|
|
|
'value':' We do not want to break your existing configuration, '
|
|
|
|
|
'hence:'},
|
|
|
|
|
# this needs to be kept updated if we change more options
|
|
|
|
|
{'name':'comment', 'type':'comment',
|
|
|
|
|
'value':' URI, BASE and TLS_CACERT have been added if they '
|
|
|
|
|
'were not set.'},
|
|
|
|
|
{'name':'comment', 'type':'comment',
|
|
|
|
|
'value':' In case any of them were set, a comment with '
|
|
|
|
|
'trailing note'},
|
|
|
|
|
{'name':'comment', 'type':'comment',
|
|
|
|
|
'value':' "# modified by IPA" note has been inserted.'},
|
|
|
|
|
{'name':'comment', 'type':'comment',
|
|
|
|
|
'value':' To use IPA server with openLDAP tools, please comment '
|
|
|
|
|
'out your'},
|
|
|
|
|
{'name':'comment', 'type':'comment',
|
|
|
|
|
'value':' existing configuration for these options and '
|
|
|
|
|
'uncomment the'},
|
|
|
|
|
{'name':'comment', 'type':'comment',
|
|
|
|
|
'value':' corresponding lines generated by IPA.'},
|
|
|
|
|
{'name':'empty', 'type':'empty'},
|
|
|
|
|
{'name':'empty', 'type':'empty'},
|
|
|
|
|
{'action':'addifnotset', 'name':'URI', 'type':'option',
|
|
|
|
|
'value':'ldaps://'+ cli_server[0]},
|
|
|
|
|
{'action':'addifnotset', 'name':'BASE', 'type':'option',
|
|
|
|
|
'value':str(cli_basedn)},
|
|
|
|
|
{'action':'addifnotset', 'name':'TLS_CACERT', 'type':'option',
|
|
|
|
|
'value':CACERT},]
|
2012-01-31 22:44:20 -05:00
|
|
|
|
2014-06-17 11:45:43 +02:00
|
|
|
target_fname = paths.OPENLDAP_LDAP_CONF
|
2012-01-31 22:44:20 -05:00
|
|
|
fstore.backup_file(target_fname)
|
2013-04-22 12:55:38 +02:00
|
|
|
|
|
|
|
|
error_msg = "Configuring {path} failed with: {err}"
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
ldapconf.changeConf(target_fname, opts)
|
2015-07-30 16:49:29 +02:00
|
|
|
except SyntaxError as e:
|
2013-04-22 12:55:38 +02:00
|
|
|
root_logger.info("Could not parse {path}".format(path=target_fname))
|
|
|
|
|
root_logger.debug(error_msg.format(path=target_fname, err=str(e)))
|
|
|
|
|
return False
|
2015-07-30 16:49:29 +02:00
|
|
|
except IOError as e :
|
2013-04-22 12:55:38 +02:00
|
|
|
root_logger.info("{path} does not exist.".format(path=target_fname))
|
|
|
|
|
root_logger.debug(error_msg.format(path=target_fname, err=str(e)))
|
|
|
|
|
return False
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e: # we do not want to fail in an optional step
|
2013-04-22 12:55:38 +02:00
|
|
|
root_logger.debug(error_msg.format(path=target_fname, err=str(e)))
|
|
|
|
|
return False
|
|
|
|
|
|
2015-07-15 16:38:06 +02:00
|
|
|
os.chmod(target_fname, 0o644)
|
2013-04-22 12:55:38 +02:00
|
|
|
return True
|
2012-01-31 22:44:20 -05:00
|
|
|
|
2009-11-19 14:14:42 -05:00
|
|
|
def hardcode_ldap_server(cli_server):
|
|
|
|
|
"""
|
|
|
|
|
DNS Discovery didn't return a valid IPA server, hardcode a value into
|
|
|
|
|
the file instead.
|
|
|
|
|
"""
|
2014-06-17 11:45:43 +02:00
|
|
|
if not file_exists(paths.LDAP_CONF):
|
2010-08-04 10:09:35 -04:00
|
|
|
return
|
|
|
|
|
|
2009-11-19 14:14:42 -05:00
|
|
|
ldapconf = ipaclient.ipachangeconf.IPAChangeConf("IPA Installer")
|
|
|
|
|
ldapconf.setOptionAssignment(" ")
|
|
|
|
|
|
2012-07-03 17:37:22 -04:00
|
|
|
opts = [{'name':'uri', 'type':'option', 'action':'set', 'value':'ldap://'+ipautil.format_netloc(cli_server[0])},
|
2009-11-19 14:14:42 -05:00
|
|
|
{'name':'empty', 'type':'empty'}]
|
|
|
|
|
|
|
|
|
|
# Errors raised by this should be caught by the caller
|
2014-06-17 11:45:43 +02:00
|
|
|
ldapconf.changeConf(paths.LDAP_CONF, opts)
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Changed configuration of /etc/ldap.conf to use " +
|
2012-07-03 17:37:22 -04:00
|
|
|
"hardcoded server name: %s", cli_server[0])
|
2009-11-19 14:14:42 -05:00
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
2012-09-06 03:52:20 -04:00
|
|
|
def configure_krb5_conf(cli_realm, cli_domain, cli_server, cli_kdc, dnsok,
|
|
|
|
|
options, filename, client_domain):
|
2009-11-19 14:14:42 -05:00
|
|
|
|
|
|
|
|
krbconf = ipaclient.ipachangeconf.IPAChangeConf("IPA Installer")
|
2012-10-31 10:59:04 +01:00
|
|
|
krbconf.setOptionAssignment((" = ", " "))
|
2009-11-19 14:14:42 -05:00
|
|
|
krbconf.setSectionNameDelimiters(("[","]"))
|
|
|
|
|
krbconf.setSubSectionDelimiters(("{","}"))
|
|
|
|
|
krbconf.setIndent((""," "," "))
|
|
|
|
|
|
|
|
|
|
opts = [{'name':'comment', 'type':'comment', 'value':'File modified by ipa-client-install'},
|
|
|
|
|
{'name':'empty', 'type':'empty'}]
|
|
|
|
|
|
2012-10-31 10:59:04 +01:00
|
|
|
# SSSD include dir
|
|
|
|
|
if options.sssd:
|
2014-06-17 11:45:43 +02:00
|
|
|
opts.append({'name':'includedir', 'type':'option', 'value':paths.SSSD_PUBCONF_KRB5_INCLUDE_D_DIR, 'delim':' '})
|
2012-10-31 10:59:04 +01:00
|
|
|
opts.append({'name':'empty', 'type':'empty'})
|
|
|
|
|
|
2009-11-19 14:14:42 -05:00
|
|
|
#[libdefaults]
|
|
|
|
|
libopts = [{'name':'default_realm', 'type':'option', 'value':cli_realm}]
|
2011-03-21 14:50:05 +01:00
|
|
|
if not dnsok or not cli_kdc or options.force:
|
2009-11-19 14:14:42 -05:00
|
|
|
libopts.append({'name':'dns_lookup_realm', 'type':'option', 'value':'false'})
|
|
|
|
|
libopts.append({'name':'dns_lookup_kdc', 'type':'option', 'value':'false'})
|
|
|
|
|
else:
|
|
|
|
|
libopts.append({'name':'dns_lookup_realm', 'type':'option', 'value':'true'})
|
|
|
|
|
libopts.append({'name':'dns_lookup_kdc', 'type':'option', 'value':'true'})
|
2011-02-10 21:47:45 +01:00
|
|
|
libopts.append({'name':'rdns', 'type':'option', 'value':'false'})
|
2009-11-19 14:14:42 -05:00
|
|
|
libopts.append({'name':'ticket_lifetime', 'type':'option', 'value':'24h'})
|
|
|
|
|
libopts.append({'name':'forwardable', 'type':'option', 'value':'yes'})
|
2014-12-05 11:18:55 -05:00
|
|
|
libopts.append({'name':'udp_preference_limit', 'type':'option', 'value':'0'})
|
2009-11-19 14:14:42 -05:00
|
|
|
|
2013-11-29 13:29:20 +01:00
|
|
|
# Configure KEYRING CCACHE if supported
|
|
|
|
|
if kernel_keyring.is_persistent_keyring_supported():
|
|
|
|
|
root_logger.debug("Enabling persistent keyring CCACHE")
|
|
|
|
|
libopts.append({'name':'default_ccache_name', 'type':'option',
|
|
|
|
|
'value':'KEYRING:persistent:%{uid}'})
|
|
|
|
|
|
2009-11-19 14:14:42 -05:00
|
|
|
opts.append({'name':'libdefaults', 'type':'section', 'value':libopts})
|
|
|
|
|
opts.append({'name':'empty', 'type':'empty'})
|
|
|
|
|
|
|
|
|
|
#the following are necessary only if DNS discovery does not work
|
2012-07-03 17:37:22 -04:00
|
|
|
kropts = []
|
2011-03-21 14:50:05 +01:00
|
|
|
if not dnsok or not cli_kdc or options.force:
|
2009-11-19 14:14:42 -05:00
|
|
|
#[realms]
|
2012-07-03 17:37:22 -04:00
|
|
|
for server in cli_server:
|
|
|
|
|
kropts.append({'name':'kdc', 'type':'option', 'value':ipautil.format_netloc(server, 88)})
|
2012-08-16 13:16:55 +02:00
|
|
|
kropts.append({'name':'master_kdc', 'type':'option', 'value':ipautil.format_netloc(server, 88)})
|
2012-07-03 17:37:22 -04:00
|
|
|
kropts.append({'name':'admin_server', 'type':'option', 'value':ipautil.format_netloc(server, 749)})
|
|
|
|
|
kropts.append({'name':'default_domain', 'type':'option', 'value':cli_domain})
|
2012-11-15 14:57:52 -05:00
|
|
|
kropts.append({'name':'pkinit_anchors', 'type':'option', 'value':'FILE:%s' % CACERT})
|
2010-09-21 15:57:46 -04:00
|
|
|
ropts = [{'name':cli_realm, 'type':'subsection', 'value':kropts}]
|
|
|
|
|
|
|
|
|
|
opts.append({'name':'realms', 'type':'section', 'value':ropts})
|
|
|
|
|
opts.append({'name':'empty', 'type':'empty'})
|
2009-11-19 14:14:42 -05:00
|
|
|
|
|
|
|
|
#[domain_realm]
|
|
|
|
|
dropts = [{'name':'.'+cli_domain, 'type':'option', 'value':cli_realm},
|
|
|
|
|
{'name':cli_domain, 'type':'option', 'value':cli_realm}]
|
2011-10-21 11:18:26 +02:00
|
|
|
|
|
|
|
|
#add client domain mapping if different from server domain
|
|
|
|
|
if cli_domain != client_domain:
|
|
|
|
|
dropts.append({'name':'.'+client_domain, 'type':'option', 'value':cli_realm})
|
|
|
|
|
dropts.append({'name':client_domain, 'type':'option', 'value':cli_realm})
|
|
|
|
|
|
2009-11-19 14:14:42 -05:00
|
|
|
opts.append({'name':'domain_realm', 'type':'section', 'value':dropts})
|
|
|
|
|
opts.append({'name':'empty', 'type':'empty'})
|
|
|
|
|
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.debug("Writing Kerberos configuration to %s:", filename)
|
|
|
|
|
root_logger.debug("%s", krbconf.dump(opts))
|
2011-03-23 14:49:48 +01:00
|
|
|
|
2011-08-30 16:32:40 +02:00
|
|
|
krbconf.newConf(filename, opts)
|
2015-07-15 16:38:06 +02:00
|
|
|
os.chmod(filename, 0o644)
|
2009-11-19 14:14:42 -05:00
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
|
2014-10-07 19:07:13 +02:00
|
|
|
def configure_certmonger(fstore, subject_base, cli_realm, hostname, options,
|
2014-10-13 14:30:15 +02:00
|
|
|
ca_enabled):
|
2014-10-07 19:07:13 +02:00
|
|
|
if not options.request_cert:
|
|
|
|
|
return
|
|
|
|
|
|
2014-10-13 14:30:15 +02:00
|
|
|
if not ca_enabled:
|
2014-10-07 19:07:13 +02:00
|
|
|
root_logger.warning(
|
|
|
|
|
"An RA is not configured on the server. "
|
|
|
|
|
"Not requesting host certificate.")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
principal = 'host/%s@%s' % (hostname, cli_realm)
|
|
|
|
|
|
|
|
|
|
if options.hostname:
|
|
|
|
|
# If the hostname is explicitly set then we need to tell certmonger
|
|
|
|
|
# which principal name to use when requesting certs.
|
|
|
|
|
certmonger.add_principal_to_cas(principal)
|
|
|
|
|
|
2015-07-07 15:49:51 +02:00
|
|
|
cmonger = services.knownservices.certmonger
|
2014-10-07 19:07:13 +02:00
|
|
|
try:
|
|
|
|
|
cmonger.enable()
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2014-10-07 19:07:13 +02:00
|
|
|
root_logger.error(
|
|
|
|
|
"Failed to configure automatic startup of the %s daemon: %s",
|
|
|
|
|
cmonger.service_name, str(e))
|
|
|
|
|
root_logger.warning(
|
|
|
|
|
"Automatic certificate management will not be available")
|
|
|
|
|
|
|
|
|
|
# Request our host cert
|
2015-07-07 15:49:51 +02:00
|
|
|
subject = str(DN(('CN', hostname), subject_base))
|
|
|
|
|
passwd_fname = os.path.join(paths.IPA_NSSDB_DIR, 'pwdfile.txt')
|
|
|
|
|
try:
|
|
|
|
|
certmonger.request_cert(nssdb=paths.IPA_NSSDB_DIR,
|
|
|
|
|
nickname='Local IPA host',
|
|
|
|
|
subject=subject,
|
|
|
|
|
principal=principal,
|
|
|
|
|
passwd_fname=passwd_fname)
|
|
|
|
|
except Exception:
|
|
|
|
|
root_logger.error("%s request for host certificate failed",
|
|
|
|
|
cmonger.service_name)
|
2014-10-07 19:07:13 +02:00
|
|
|
|
2012-04-12 14:19:15 +02:00
|
|
|
def configure_sssd_conf(fstore, cli_realm, cli_domain, cli_server, options, client_domain, client_hostname):
|
2011-10-12 19:14:55 +03:00
|
|
|
try:
|
|
|
|
|
sssdconfig = SSSDConfig.SSSDConfig()
|
|
|
|
|
sssdconfig.import_config()
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2014-06-17 11:45:43 +02:00
|
|
|
if os.path.exists(paths.SSSD_CONF) and options.preserve_sssd:
|
2011-10-12 19:14:55 +03:00
|
|
|
# SSSD config is in place but we are unable to read it
|
|
|
|
|
# In addition, we are instructed to preserve it
|
|
|
|
|
# This all means we can't use it and have to bail out
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
|
|
|
|
"SSSD config exists but cannot be parsed: %s", str(e))
|
|
|
|
|
root_logger.error(
|
|
|
|
|
"Was instructed to preserve existing SSSD config")
|
|
|
|
|
root_logger.info("Correct errors in /etc/sssd/sssd.conf and " +
|
|
|
|
|
"re-run installation")
|
2011-10-12 19:14:55 +03:00
|
|
|
return 1
|
|
|
|
|
|
|
|
|
|
# SSSD configuration does not exist or we are not asked to preserve it, create new one
|
|
|
|
|
# We do make new SSSDConfig instance because IPAChangeConf-derived classes have no
|
|
|
|
|
# means to reset their state and ParseError exception could come due to parsing
|
|
|
|
|
# error from older version which cannot be upgraded anymore, leaving sssdconfig
|
|
|
|
|
# instance practically unusable
|
|
|
|
|
# Note that we already backed up sssd.conf before going into this routine
|
2012-06-08 09:36:38 -04:00
|
|
|
if isinstance(e, IOError):
|
|
|
|
|
pass
|
2011-10-12 19:14:55 +03:00
|
|
|
else:
|
|
|
|
|
# It was not IOError so it must have been parsing error
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Unable to parse existing SSSD config. " +
|
|
|
|
|
"As option --preserve-sssd was not specified, new config " +
|
|
|
|
|
"will override the old one.")
|
|
|
|
|
root_logger.info("The old /etc/sssd/sssd.conf is backed up and " +
|
|
|
|
|
"will be restored during uninstall.")
|
2011-11-15 14:39:31 -05:00
|
|
|
root_logger.info("New SSSD config will be created")
|
2011-10-12 19:14:55 +03:00
|
|
|
sssdconfig = SSSDConfig.SSSDConfig()
|
|
|
|
|
sssdconfig.new_config()
|
2010-02-03 15:41:02 -05:00
|
|
|
|
2011-10-14 14:05:07 -04:00
|
|
|
try:
|
|
|
|
|
domain = sssdconfig.new_domain(cli_domain)
|
|
|
|
|
except SSSDConfig.DomainAlreadyExistsError:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Domain %s is already configured in existing SSSD " +
|
|
|
|
|
"config, creating a new one.", cli_domain)
|
|
|
|
|
root_logger.info("The old /etc/sssd/sssd.conf is backed up and will " +
|
|
|
|
|
"be restored during uninstall.")
|
2011-10-14 14:05:07 -04:00
|
|
|
sssdconfig = SSSDConfig.SSSDConfig()
|
|
|
|
|
sssdconfig.new_config()
|
|
|
|
|
domain = sssdconfig.new_domain(cli_domain)
|
|
|
|
|
|
2014-05-29 10:37:18 +02:00
|
|
|
ssh_dir = services.knownservices.sshd.get_config_dir()
|
2012-09-12 09:19:26 -04:00
|
|
|
ssh_config = os.path.join(ssh_dir, 'ssh_config')
|
|
|
|
|
sshd_config = os.path.join(ssh_dir, 'sshd_config')
|
|
|
|
|
|
|
|
|
|
if (options.conf_ssh and file_exists(ssh_config)) or (options.conf_sshd and file_exists(sshd_config)):
|
|
|
|
|
try:
|
|
|
|
|
sssdconfig.new_service('ssh')
|
|
|
|
|
except SSSDConfig.ServiceAlreadyExists:
|
|
|
|
|
pass
|
|
|
|
|
except SSSDConfig.ServiceNotRecognizedError:
|
|
|
|
|
root_logger.error("Unable to activate the SSH service in SSSD config.")
|
|
|
|
|
root_logger.info(
|
|
|
|
|
"Please make sure you have SSSD built with SSH support installed.")
|
|
|
|
|
root_logger.info(
|
|
|
|
|
"Configure SSH support manually in /etc/sssd/sssd.conf.")
|
2012-02-16 04:21:56 -05:00
|
|
|
|
2012-09-12 09:19:26 -04:00
|
|
|
sssdconfig.activate_service('ssh')
|
2012-09-11 04:43:57 -04:00
|
|
|
|
2013-11-21 13:09:28 +01:00
|
|
|
if options.conf_sudo:
|
|
|
|
|
# Activate the service in the SSSD config
|
|
|
|
|
try:
|
|
|
|
|
sssdconfig.new_service('sudo')
|
|
|
|
|
except SSSDConfig.ServiceAlreadyExists:
|
|
|
|
|
pass
|
|
|
|
|
except SSSDConfig.ServiceNotRecognizedError:
|
|
|
|
|
root_logger.error("Unable to activate the SUDO service in "
|
|
|
|
|
"SSSD config.")
|
|
|
|
|
|
|
|
|
|
sssdconfig.activate_service('sudo')
|
|
|
|
|
configure_nsswitch_database(fstore, 'sudoers', ['sss'],
|
|
|
|
|
default_value=['files'])
|
|
|
|
|
|
2010-02-03 15:41:02 -05:00
|
|
|
domain.add_provider('ipa', 'id')
|
|
|
|
|
|
2011-12-21 22:32:01 +01:00
|
|
|
#add discovery domain if client domain different from server domain
|
2013-10-03 15:09:02 +02:00
|
|
|
#do not set this config in server mode (#3947)
|
|
|
|
|
if not options.on_master and cli_domain != client_domain:
|
2011-12-21 22:32:01 +01:00
|
|
|
domain.set_option('dns_discovery_domain', cli_domain)
|
|
|
|
|
|
2011-06-20 15:39:25 -04:00
|
|
|
if not options.on_master:
|
2012-06-11 15:43:04 -04:00
|
|
|
if options.primary:
|
2012-07-03 17:37:22 -04:00
|
|
|
domain.set_option('ipa_server', ', '.join(cli_server))
|
2012-06-11 15:43:04 -04:00
|
|
|
else:
|
2012-07-03 17:37:22 -04:00
|
|
|
domain.set_option('ipa_server', '_srv_, %s' % ', '.join(cli_server))
|
2011-06-20 15:39:25 -04:00
|
|
|
else:
|
2013-07-15 11:09:14 +02:00
|
|
|
domain.set_option('ipa_server_mode', 'True')
|
2011-06-20 15:39:25 -04:00
|
|
|
# the master should only use itself for Kerberos
|
2012-07-03 17:37:22 -04:00
|
|
|
domain.set_option('ipa_server', cli_server[0])
|
2013-07-15 11:09:14 +02:00
|
|
|
|
2015-05-26 18:11:08 +02:00
|
|
|
# increase memcache timeout to 10 minutes when in server mode
|
|
|
|
|
try:
|
|
|
|
|
nss_service = sssdconfig.get_service('nss')
|
|
|
|
|
except SSSDConfig.NoServiceError:
|
|
|
|
|
nss_service = sssdconfig.new_service('nss')
|
|
|
|
|
|
|
|
|
|
nss_service.set_option('memcache_timeout', 600)
|
|
|
|
|
sssdconfig.save_service(nss_service)
|
|
|
|
|
|
2010-02-03 15:41:02 -05:00
|
|
|
domain.set_option('ipa_domain', cli_domain)
|
2012-04-12 14:19:15 +02:00
|
|
|
domain.set_option('ipa_hostname', client_hostname)
|
2011-02-21 10:25:52 -05:00
|
|
|
if cli_domain.lower() != cli_realm.lower():
|
|
|
|
|
domain.set_option('krb5_realm', cli_realm)
|
2010-02-03 15:41:02 -05:00
|
|
|
|
|
|
|
|
# Might need this if /bin/hostname doesn't return a FQDN
|
|
|
|
|
#domain.set_option('ipa_hostname', 'client.example.com')
|
|
|
|
|
|
|
|
|
|
domain.add_provider('ipa', 'auth')
|
|
|
|
|
domain.add_provider('ipa', 'chpass')
|
|
|
|
|
if not options.permit:
|
|
|
|
|
domain.add_provider('ipa', 'access')
|
|
|
|
|
else:
|
|
|
|
|
domain.add_provider('permit', 'access')
|
|
|
|
|
|
|
|
|
|
domain.set_option('cache_credentials', True)
|
|
|
|
|
|
2011-07-19 16:07:05 +03:00
|
|
|
# SSSD will need TLS for checking if ipaMigrationEnabled attribute is set
|
|
|
|
|
# Note that SSSD will force StartTLS because the channel is later used for
|
|
|
|
|
# authentication as well if password migration is enabled. Thus set the option
|
|
|
|
|
# unconditionally.
|
2012-11-15 14:57:52 -05:00
|
|
|
domain.set_option('ldap_tls_cacert', CACERT)
|
2011-07-19 16:07:05 +03:00
|
|
|
|
2011-02-17 08:30:36 -05:00
|
|
|
if options.dns_updates:
|
2015-01-19 12:56:25 +01:00
|
|
|
domain.set_option('dyndns_update', True)
|
2015-08-18 19:45:23 +02:00
|
|
|
if options.all_ip_addresses:
|
|
|
|
|
domain.set_option('dyndns_iface', '*')
|
|
|
|
|
else:
|
|
|
|
|
iface = get_server_connection_interface(cli_server[0])
|
|
|
|
|
domain.set_option('dyndns_iface', iface)
|
2011-06-28 14:19:51 +02:00
|
|
|
if options.krb5_offline_passwords:
|
|
|
|
|
domain.set_option('krb5_store_password_if_offline', True)
|
2011-02-17 08:30:36 -05:00
|
|
|
|
2010-02-03 15:41:02 -05:00
|
|
|
domain.set_active(True)
|
|
|
|
|
|
|
|
|
|
sssdconfig.save_domain(domain)
|
2014-06-17 11:45:43 +02:00
|
|
|
sssdconfig.write(paths.SSSD_CONF)
|
2010-02-03 15:41:02 -05:00
|
|
|
|
|
|
|
|
return 0
|
|
|
|
|
|
2011-12-07 03:49:09 -05:00
|
|
|
def change_ssh_config(filename, changes, sections):
|
2014-06-18 15:26:17 +02:00
|
|
|
if not changes:
|
2011-12-07 03:49:09 -05:00
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
f = open(filename, 'r')
|
2015-07-30 16:49:29 +02:00
|
|
|
except IOError as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Failed to open '%s': %s", filename, str(e))
|
2011-12-07 03:49:09 -05:00
|
|
|
return False
|
|
|
|
|
|
2014-06-18 15:26:17 +02:00
|
|
|
change_keys = tuple(key.lower() for key in changes)
|
|
|
|
|
section_keys = tuple(key.lower() for key in sections)
|
|
|
|
|
|
2011-12-07 03:49:09 -05:00
|
|
|
lines = []
|
|
|
|
|
for line in f:
|
2014-06-18 15:26:17 +02:00
|
|
|
line = line.rstrip('\n')
|
2011-12-07 03:49:09 -05:00
|
|
|
pline = line.strip()
|
2014-06-18 15:26:17 +02:00
|
|
|
if not pline or pline.startswith('#'):
|
2011-12-07 03:49:09 -05:00
|
|
|
lines.append(line)
|
|
|
|
|
continue
|
2014-06-18 15:26:17 +02:00
|
|
|
option = pline.split()[0].lower()
|
|
|
|
|
if option in section_keys:
|
2011-12-07 03:49:09 -05:00
|
|
|
lines.append(line)
|
2014-06-18 15:26:17 +02:00
|
|
|
break
|
|
|
|
|
if option in change_keys:
|
|
|
|
|
line = '#' + line
|
2011-12-07 03:49:09 -05:00
|
|
|
lines.append(line)
|
2014-06-18 15:26:17 +02:00
|
|
|
for option, value in changes.items():
|
|
|
|
|
if value is not None:
|
|
|
|
|
lines.append('%s %s' % (option, value))
|
2011-12-07 03:49:09 -05:00
|
|
|
for line in f:
|
2014-06-18 15:26:17 +02:00
|
|
|
line = line.rstrip('\n')
|
2011-12-07 03:49:09 -05:00
|
|
|
lines.append(line)
|
2014-06-18 15:26:17 +02:00
|
|
|
lines.append('')
|
2011-12-07 03:49:09 -05:00
|
|
|
|
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
f = open(filename, 'w')
|
2015-07-30 16:49:29 +02:00
|
|
|
except IOError as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Failed to open '%s': %s", filename, str(e))
|
2011-12-07 03:49:09 -05:00
|
|
|
return False
|
|
|
|
|
|
2014-06-18 15:26:17 +02:00
|
|
|
f.write('\n'.join(lines))
|
2011-12-07 03:49:09 -05:00
|
|
|
|
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
2012-09-12 09:19:26 -04:00
|
|
|
def configure_ssh_config(fstore, options):
|
2014-05-29 10:37:18 +02:00
|
|
|
ssh_dir = services.knownservices.sshd.get_config_dir()
|
2011-12-07 03:49:09 -05:00
|
|
|
ssh_config = os.path.join(ssh_dir, 'ssh_config')
|
|
|
|
|
|
2012-09-12 09:19:26 -04:00
|
|
|
if not file_exists(ssh_config):
|
|
|
|
|
root_logger.info("%s not found, skipping configuration" % ssh_config)
|
|
|
|
|
return
|
2011-12-07 03:49:09 -05:00
|
|
|
|
2012-09-12 09:19:26 -04:00
|
|
|
fstore.backup_file(ssh_config)
|
2012-05-23 05:00:55 -04:00
|
|
|
|
2012-09-12 09:19:26 -04:00
|
|
|
changes = {
|
|
|
|
|
'PubkeyAuthentication': 'yes',
|
|
|
|
|
}
|
2011-12-07 03:49:09 -05:00
|
|
|
|
2014-10-03 17:27:57 -06:00
|
|
|
if options.sssd and file_exists(paths.SSS_SSH_KNOWNHOSTSPROXY):
|
|
|
|
|
changes['ProxyCommand'] = '%s -p %%p %%h' % paths.SSS_SSH_KNOWNHOSTSPROXY
|
|
|
|
|
changes['GlobalKnownHostsFile'] = paths.SSSD_PUBCONF_KNOWN_HOSTS
|
2012-09-12 09:19:26 -04:00
|
|
|
if options.trust_sshfp:
|
|
|
|
|
changes['VerifyHostKeyDNS'] = 'yes'
|
|
|
|
|
changes['HostKeyAlgorithms'] = 'ssh-rsa,ssh-dss'
|
2011-12-07 03:49:09 -05:00
|
|
|
|
2012-09-12 09:19:26 -04:00
|
|
|
change_ssh_config(ssh_config, changes, ['Host'])
|
|
|
|
|
root_logger.info('Configured %s', ssh_config)
|
2011-12-07 03:49:09 -05:00
|
|
|
|
2012-09-12 09:19:26 -04:00
|
|
|
def configure_sshd_config(fstore, options):
|
2014-05-29 10:37:18 +02:00
|
|
|
sshd = services.knownservices.sshd
|
2012-09-12 09:19:26 -04:00
|
|
|
ssh_dir = sshd.get_config_dir()
|
|
|
|
|
sshd_config = os.path.join(ssh_dir, 'sshd_config')
|
|
|
|
|
|
|
|
|
|
if not file_exists(sshd_config):
|
|
|
|
|
root_logger.info("%s not found, skipping configuration" % sshd_config)
|
2011-12-07 03:49:09 -05:00
|
|
|
return
|
|
|
|
|
|
|
|
|
|
fstore.backup_file(sshd_config)
|
|
|
|
|
|
|
|
|
|
changes = {
|
2012-05-23 05:00:55 -04:00
|
|
|
'PubkeyAuthentication': 'yes',
|
2012-04-30 11:58:55 -04:00
|
|
|
'KerberosAuthentication': 'no',
|
2011-12-07 03:49:09 -05:00
|
|
|
'GSSAPIAuthentication': 'yes',
|
|
|
|
|
'UsePAM': 'yes',
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-03 17:27:57 -06:00
|
|
|
if options.sssd and file_exists(paths.SSS_SSH_AUTHORIZEDKEYS):
|
2013-04-18 18:06:54 +02:00
|
|
|
authorized_keys_changes = None
|
|
|
|
|
|
|
|
|
|
candidates = (
|
|
|
|
|
{
|
2014-10-03 17:27:57 -06:00
|
|
|
'AuthorizedKeysCommand': paths.SSS_SSH_AUTHORIZEDKEYS,
|
2013-04-18 18:06:54 +02:00
|
|
|
'AuthorizedKeysCommandUser': 'nobody',
|
|
|
|
|
},
|
|
|
|
|
{
|
2014-10-03 17:27:57 -06:00
|
|
|
'AuthorizedKeysCommand': paths.SSS_SSH_AUTHORIZEDKEYS,
|
2013-04-18 18:06:54 +02:00
|
|
|
'AuthorizedKeysCommandRunAs': 'nobody',
|
|
|
|
|
},
|
|
|
|
|
{
|
2014-10-03 17:27:57 -06:00
|
|
|
'PubKeyAgent': '%s %%u' % paths.SSS_SSH_AUTHORIZEDKEYS,
|
2013-04-18 18:06:54 +02:00
|
|
|
'PubKeyAgentRunAs': 'nobody',
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
for candidate in candidates:
|
2014-06-17 11:45:43 +02:00
|
|
|
args = ['sshd', '-t', '-f', paths.DEV_NULL]
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 13:51:14 +02:00
|
|
|
for item in candidate.items():
|
2013-04-18 18:06:54 +02:00
|
|
|
args.append('-o')
|
|
|
|
|
args.append('%s=%s' % item)
|
|
|
|
|
|
|
|
|
|
(stdout, stderr, retcode) = ipautil.run(args, raiseonerr=False)
|
2012-02-16 04:21:56 -05:00
|
|
|
if retcode == 0:
|
2013-04-18 18:06:54 +02:00
|
|
|
authorized_keys_changes = candidate
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
if authorized_keys_changes is not None:
|
|
|
|
|
changes.update(authorized_keys_changes)
|
|
|
|
|
else:
|
|
|
|
|
root_logger.warning("Installed OpenSSH server does not "
|
|
|
|
|
"support dynamically loading authorized user keys. "
|
|
|
|
|
"Public key authentication of IPA users will not be "
|
|
|
|
|
"available.")
|
2012-02-16 04:21:56 -05:00
|
|
|
|
2011-12-07 03:49:09 -05:00
|
|
|
change_ssh_config(sshd_config, changes, ['Match'])
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info('Configured %s', sshd_config)
|
2011-12-07 03:49:09 -05:00
|
|
|
|
|
|
|
|
if sshd.is_running():
|
|
|
|
|
try:
|
|
|
|
|
sshd.restart()
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2011-12-07 03:49:09 -05:00
|
|
|
log_service_error(sshd.service_name, 'restart', e)
|
|
|
|
|
|
2013-08-30 16:05:01 +02:00
|
|
|
|
|
|
|
|
def configure_automount(options):
|
|
|
|
|
root_logger.info('\nConfiguring automount:')
|
|
|
|
|
|
|
|
|
|
args = [
|
|
|
|
|
'ipa-client-automount', '--debug', '-U',
|
|
|
|
|
'--location', options.location
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
if options.server:
|
|
|
|
|
args.extend(['--server', options.server[0]])
|
|
|
|
|
if not options.sssd:
|
|
|
|
|
args.append('--no-sssd')
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
stdout, _, _ = run(args)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2013-08-30 16:05:01 +02:00
|
|
|
root_logger.error('Automount configuration failed: %s', str(e))
|
|
|
|
|
else:
|
|
|
|
|
root_logger.info(stdout)
|
|
|
|
|
|
|
|
|
|
|
2013-09-25 13:45:45 +02:00
|
|
|
def configure_nisdomain(options, domain):
|
|
|
|
|
domain = options.nisdomain or domain
|
|
|
|
|
root_logger.info('Configuring %s as NIS domain.' % domain)
|
|
|
|
|
|
|
|
|
|
nis_domain_name = ''
|
|
|
|
|
|
|
|
|
|
# First backup the old NIS domain name
|
2014-06-17 11:45:43 +02:00
|
|
|
if os.path.exists(paths.BIN_NISDOMAINNAME):
|
2013-09-25 13:45:45 +02:00
|
|
|
try:
|
2014-06-17 11:45:43 +02:00
|
|
|
nis_domain_name, _, _ = ipautil.run([paths.BIN_NISDOMAINNAME])
|
2015-07-30 16:49:29 +02:00
|
|
|
except CalledProcessError as e:
|
2013-09-25 13:45:45 +02:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
statestore.backup_state('network', 'nisdomain', nis_domain_name)
|
|
|
|
|
|
|
|
|
|
# Backup the state of the domainname service
|
|
|
|
|
statestore.backup_state("domainname", "enabled",
|
2014-05-29 10:37:18 +02:00
|
|
|
services.knownservices.domainname.is_enabled())
|
2013-09-25 13:45:45 +02:00
|
|
|
|
|
|
|
|
# Set the new NIS domain name
|
2014-05-27 09:13:59 +02:00
|
|
|
tasks.set_nisdomain(domain)
|
2013-09-25 13:45:45 +02:00
|
|
|
|
|
|
|
|
# Enable and start the domainname service
|
2014-05-29 10:37:18 +02:00
|
|
|
services.knownservices.domainname.enable()
|
2014-06-30 11:43:49 +02:00
|
|
|
# Restart rather than start so that new NIS domain name is loaded
|
|
|
|
|
# if the service is already running
|
|
|
|
|
services.knownservices.domainname.restart()
|
2013-09-25 13:45:45 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def unconfigure_nisdomain():
|
|
|
|
|
# Set the nisdomain permanent and current nisdomain configuration as it was
|
|
|
|
|
if statestore.has_state('network'):
|
|
|
|
|
old_nisdomain = statestore.restore_state('network','nisdomain') or ''
|
|
|
|
|
|
|
|
|
|
if old_nisdomain:
|
|
|
|
|
root_logger.info('Restoring %s as NIS domain.' % old_nisdomain)
|
|
|
|
|
else:
|
|
|
|
|
root_logger.info('Unconfiguring the NIS domain.')
|
|
|
|
|
|
2014-05-27 09:13:59 +02:00
|
|
|
tasks.set_nisdomain(old_nisdomain)
|
2013-09-25 13:45:45 +02:00
|
|
|
|
|
|
|
|
# Restore the configuration of the domainname service
|
|
|
|
|
enabled = statestore.restore_state('domainname', 'enabled')
|
|
|
|
|
if not enabled:
|
2014-05-29 10:37:18 +02:00
|
|
|
services.knownservices.domainname.disable()
|
2013-09-25 13:45:45 +02:00
|
|
|
|
|
|
|
|
|
2015-08-18 19:45:23 +02:00
|
|
|
def get_iface_from_ip(ip_addr):
|
|
|
|
|
ipresult = ipautil.run([paths.IP, '-oneline', 'address', 'show'])
|
|
|
|
|
for line in ipresult[0].split('\n'):
|
|
|
|
|
fields = line.split()
|
|
|
|
|
if len(fields) < 6:
|
2012-07-03 16:49:10 +02:00
|
|
|
continue
|
2015-08-18 19:45:23 +02:00
|
|
|
if fields[2] not in ['inet', 'inet6']:
|
|
|
|
|
continue
|
|
|
|
|
(ip, mask) = fields[3].rsplit('/', 1)
|
|
|
|
|
if ip == ip_addr:
|
|
|
|
|
return fields[1]
|
|
|
|
|
else:
|
|
|
|
|
raise RuntimeError("IP %s not assigned to any interface." % ip_addr)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_local_ipaddresses(iface=None):
|
|
|
|
|
args = [paths.IP, '-oneline', 'address', 'show']
|
|
|
|
|
if iface:
|
|
|
|
|
args += ['dev', iface]
|
|
|
|
|
ipresult = ipautil.run(args)
|
|
|
|
|
lines = ipresult[0].split('\n')
|
|
|
|
|
ips = []
|
|
|
|
|
for line in lines:
|
|
|
|
|
fields = line.split()
|
|
|
|
|
if len(fields) < 6:
|
|
|
|
|
continue
|
|
|
|
|
if fields[2] not in ['inet', 'inet6']:
|
|
|
|
|
continue
|
|
|
|
|
(ip, mask) = fields[3].rsplit('/', 1)
|
2012-07-03 16:49:10 +02:00
|
|
|
try:
|
2015-08-18 19:45:23 +02:00
|
|
|
ips.append(ipautil.CheckedIPAddress(ip))
|
|
|
|
|
except ValueError:
|
|
|
|
|
continue
|
|
|
|
|
return ips
|
2012-07-03 16:49:10 +02:00
|
|
|
|
2011-02-17 08:30:36 -05:00
|
|
|
|
2011-12-07 03:36:27 -05:00
|
|
|
def do_nsupdate(update_txt):
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.debug("Writing nsupdate commands to %s:", UPDATE_FILE)
|
|
|
|
|
root_logger.debug("%s", update_txt)
|
2011-12-07 03:36:27 -05:00
|
|
|
|
|
|
|
|
update_fd = file(UPDATE_FILE, "w")
|
|
|
|
|
update_fd.write(update_txt)
|
|
|
|
|
update_fd.flush()
|
|
|
|
|
update_fd.close()
|
|
|
|
|
|
|
|
|
|
result = False
|
|
|
|
|
try:
|
2014-06-17 11:45:43 +02:00
|
|
|
ipautil.run([paths.NSUPDATE, '-g', UPDATE_FILE])
|
2011-12-07 03:36:27 -05:00
|
|
|
result = True
|
2015-07-30 16:49:29 +02:00
|
|
|
except CalledProcessError as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.debug('nsupdate failed: %s', str(e))
|
2011-12-07 03:36:27 -05:00
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
os.remove(UPDATE_FILE)
|
2012-09-25 08:35:06 -04:00
|
|
|
except Exception:
|
2011-12-07 03:36:27 -05:00
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
2015-08-18 19:45:23 +02:00
|
|
|
DELETE_TEMPLATE_A = """
|
2011-02-17 08:30:36 -05:00
|
|
|
update delete $HOSTNAME. IN A
|
2013-05-15 14:54:11 +02:00
|
|
|
show
|
2011-02-17 08:30:36 -05:00
|
|
|
send
|
|
|
|
|
"""
|
|
|
|
|
|
2015-08-18 19:45:23 +02:00
|
|
|
DELETE_TEMPLATE_AAAA = """
|
2011-02-17 08:30:36 -05:00
|
|
|
update delete $HOSTNAME. IN AAAA
|
2013-05-15 14:54:11 +02:00
|
|
|
show
|
2011-02-17 08:30:36 -05:00
|
|
|
send
|
2015-08-18 19:45:23 +02:00
|
|
|
"""
|
|
|
|
|
ADD_TEMPLATE_A = """
|
|
|
|
|
update add $HOSTNAME. $TTL IN A $IPADDRESS
|
|
|
|
|
show
|
|
|
|
|
send
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
ADD_TEMPLATE_AAAA = """
|
2011-02-17 08:30:36 -05:00
|
|
|
update add $HOSTNAME. $TTL IN AAAA $IPADDRESS
|
2013-05-15 14:54:11 +02:00
|
|
|
show
|
2011-02-17 08:30:36 -05:00
|
|
|
send
|
|
|
|
|
"""
|
|
|
|
|
|
2014-06-17 11:45:43 +02:00
|
|
|
UPDATE_FILE = paths.IPA_DNS_UPDATE_TXT
|
|
|
|
|
CCACHE_FILE = paths.IPA_DNS_CCACHE
|
2011-02-17 08:30:36 -05:00
|
|
|
|
2015-08-18 19:45:23 +02:00
|
|
|
def update_dns(server, hostname, options):
|
2011-02-17 08:30:36 -05:00
|
|
|
|
2012-07-03 16:49:10 +02:00
|
|
|
try:
|
2015-08-18 19:45:23 +02:00
|
|
|
ips = get_local_ipaddresses()
|
|
|
|
|
except CalledProcessError as e:
|
|
|
|
|
root_logger.error("Cannot update DNS records. %s" % e)
|
|
|
|
|
root_logger.debug("Unable to get local IP addresses.")
|
|
|
|
|
|
|
|
|
|
if options.all_ip_addresses:
|
|
|
|
|
update_ips = ips
|
|
|
|
|
elif options.ip_addresses:
|
|
|
|
|
update_ips = []
|
|
|
|
|
for ip in options.ip_addresses:
|
|
|
|
|
update_ips.append(ipautil.CheckedIPAddress(ip))
|
2012-06-08 09:36:38 -04:00
|
|
|
else:
|
2015-08-18 19:45:23 +02:00
|
|
|
try:
|
|
|
|
|
iface = get_server_connection_interface(server)
|
|
|
|
|
except RuntimeError as e:
|
|
|
|
|
root_logger.error("Cannot update DNS records. %s" % e)
|
|
|
|
|
return
|
|
|
|
|
try:
|
|
|
|
|
update_ips = get_local_ipaddresses(iface)
|
|
|
|
|
except CalledProcessError as e:
|
|
|
|
|
root_logger.error("Cannot update DNS records. %s" % e)
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if not update_ips:
|
|
|
|
|
root_logger.info("Failed to determine this machine's ip address(es).")
|
2011-02-17 08:30:36 -05:00
|
|
|
return
|
|
|
|
|
|
2015-08-18 19:45:23 +02:00
|
|
|
update_txt = "debug\n"
|
|
|
|
|
update_txt += ipautil.template_str(DELETE_TEMPLATE_A,
|
|
|
|
|
dict(HOSTNAME=hostname))
|
|
|
|
|
update_txt += ipautil.template_str(DELETE_TEMPLATE_AAAA,
|
|
|
|
|
dict(HOSTNAME=hostname))
|
|
|
|
|
|
|
|
|
|
for ip in update_ips:
|
|
|
|
|
sub_dict = dict(HOSTNAME=hostname, IPADDRESS=ip, TTL=1200)
|
|
|
|
|
if ip.version == 4:
|
|
|
|
|
template = ADD_TEMPLATE_A
|
|
|
|
|
elif ip.version == 6:
|
|
|
|
|
template = ADD_TEMPLATE_AAAA
|
|
|
|
|
update_txt += ipautil.template_str(template, sub_dict)
|
|
|
|
|
|
|
|
|
|
if not do_nsupdate(update_txt):
|
|
|
|
|
root_logger.error("Failed to update DNS records.")
|
|
|
|
|
verify_dns_update(hostname, update_ips)
|
|
|
|
|
|
2011-03-23 14:49:48 +01:00
|
|
|
|
2015-08-18 19:45:23 +02:00
|
|
|
def verify_dns_update(fqdn, ips):
|
|
|
|
|
"""
|
|
|
|
|
Verify that the fqdn resolves to all IP addresses and
|
|
|
|
|
that there's matching PTR record for every IP address.
|
|
|
|
|
"""
|
|
|
|
|
# verify A/AAAA records
|
|
|
|
|
missing_ips = [str(ip) for ip in ips]
|
|
|
|
|
extra_ips = []
|
|
|
|
|
for record_type in [dns.rdatatype.A, dns.rdatatype.AAAA]:
|
|
|
|
|
root_logger.debug('DNS resolver: Query: %s IN %s' %
|
|
|
|
|
(fqdn, dns.rdatatype.to_text(record_type)))
|
|
|
|
|
try:
|
|
|
|
|
answers = dns.resolver.query(fqdn, record_type)
|
|
|
|
|
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
|
|
|
|
|
root_logger.debug('DNS resolver: No record.')
|
|
|
|
|
except dns.resolver.NoNameservers:
|
|
|
|
|
root_logger.debug('DNS resolver: No nameservers answered the'
|
|
|
|
|
'query.')
|
|
|
|
|
except dns.exception.DNSException:
|
|
|
|
|
root_logger.debug('DNS resolver error.')
|
|
|
|
|
else:
|
|
|
|
|
for rdata in answers:
|
|
|
|
|
try:
|
|
|
|
|
missing_ips.remove(rdata.address)
|
|
|
|
|
except ValueError:
|
|
|
|
|
extra_ips.append(rdata.address)
|
|
|
|
|
|
|
|
|
|
# verify PTR records
|
|
|
|
|
fqdn_name = dns.name.from_text(fqdn)
|
|
|
|
|
wrong_reverse = {}
|
|
|
|
|
missing_reverse = [str(ip) for ip in ips]
|
|
|
|
|
for ip in ips:
|
|
|
|
|
ip_str = str(ip)
|
|
|
|
|
addr = dns.reversename.from_address(ip_str)
|
|
|
|
|
root_logger.debug('DNS resolver: Query: %s IN PTR' % addr)
|
|
|
|
|
try:
|
|
|
|
|
answers = dns.resolver.query(addr, dns.rdatatype.PTR)
|
|
|
|
|
except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
|
|
|
|
|
root_logger.debug('DNS resolver: No record.')
|
|
|
|
|
except dns.resolver.NoNameservers:
|
|
|
|
|
root_logger.debug('DNS resolver: No nameservers answered the'
|
|
|
|
|
'query.')
|
|
|
|
|
except dns.exception.DNSException:
|
|
|
|
|
root_logger.debug('DNS resolver error.')
|
|
|
|
|
else:
|
|
|
|
|
missing_reverse.remove(ip_str)
|
|
|
|
|
for rdata in answers:
|
|
|
|
|
if not rdata.target == fqdn_name:
|
|
|
|
|
wrong_reverse.setdefault(ip_str, []).append(rdata.target)
|
|
|
|
|
|
|
|
|
|
if missing_ips:
|
|
|
|
|
root_logger.warning('Missing A/AAAA record(s) for host %s: %s.' %
|
|
|
|
|
(fqdn, ', '.join(missing_ips)))
|
|
|
|
|
if extra_ips:
|
|
|
|
|
root_logger.warning('Extra A/AAAA record(s) for host %s: %s.' %
|
|
|
|
|
(fqdn, ', '.join(extra_ips)))
|
|
|
|
|
if missing_reverse:
|
|
|
|
|
root_logger.warning('Missing reverse record(s) for address(es): %s.' %
|
|
|
|
|
', '.join(missing_reverse))
|
|
|
|
|
if wrong_reverse:
|
|
|
|
|
root_logger.warning('Incorrect reverse record(s):')
|
|
|
|
|
for ip in wrong_reverse:
|
|
|
|
|
for target in wrong_reverse[ip]:
|
|
|
|
|
root_logger.warning('%s is pointing to %s instead of %s' %
|
|
|
|
|
(ip, target, fqdn_name))
|
|
|
|
|
|
|
|
|
|
def get_server_connection_interface(server):
|
|
|
|
|
# connect to IPA server, get all ip addresses of inteface used to connect
|
|
|
|
|
for res in socket.getaddrinfo(server, 389, socket.AF_UNSPEC, socket.SOCK_STREAM):
|
|
|
|
|
(af, socktype, proto, canonname, sa) = res
|
|
|
|
|
try:
|
|
|
|
|
s = socket.socket(af, socktype, proto)
|
|
|
|
|
except socket.error as e:
|
|
|
|
|
last_error = e
|
|
|
|
|
s = None
|
|
|
|
|
continue
|
|
|
|
|
try:
|
|
|
|
|
s.connect(sa)
|
|
|
|
|
sockname = s.getsockname()
|
|
|
|
|
ip = sockname[0]
|
|
|
|
|
except socket.error as e:
|
|
|
|
|
last_error = e
|
|
|
|
|
continue
|
|
|
|
|
finally:
|
|
|
|
|
if s:
|
|
|
|
|
s.close()
|
|
|
|
|
try:
|
|
|
|
|
return get_iface_from_ip(ip)
|
|
|
|
|
except (CalledProcessError, RuntimeError) as e:
|
|
|
|
|
last_error = e
|
2011-12-07 03:36:27 -05:00
|
|
|
else:
|
2015-08-18 19:45:23 +02:00
|
|
|
msg = "Cannot get server connection interface"
|
|
|
|
|
if last_error:
|
|
|
|
|
msg += ": %s" % (last_error)
|
|
|
|
|
raise RuntimeError(msg)
|
|
|
|
|
|
2011-02-17 08:30:36 -05:00
|
|
|
|
2015-08-18 19:45:23 +02:00
|
|
|
def client_dns(server, hostname, options):
|
2011-02-17 08:30:36 -05:00
|
|
|
|
2012-05-11 14:38:09 +02:00
|
|
|
dns_ok = ipautil.is_host_resolvable(hostname)
|
2011-02-17 08:30:36 -05:00
|
|
|
|
2012-05-11 14:38:09 +02:00
|
|
|
if not dns_ok:
|
2015-08-18 19:45:23 +02:00
|
|
|
root_logger.warning("Hostname (%s) does not have A/AAAA record.",
|
|
|
|
|
hostname)
|
|
|
|
|
|
|
|
|
|
if (options.dns_updates or options.all_ip_addresses or options.ip_addresses
|
|
|
|
|
or not dns_ok):
|
|
|
|
|
update_dns(server, hostname, options)
|
2011-02-17 08:30:36 -05:00
|
|
|
|
2015-08-18 19:45:23 +02:00
|
|
|
|
|
|
|
|
def check_ip_addresses(options):
|
|
|
|
|
if options.ip_addresses:
|
|
|
|
|
for ip in options.ip_addresses:
|
|
|
|
|
try:
|
|
|
|
|
ipautil.CheckedIPAddress(ip, match_local=True)
|
|
|
|
|
except ValueError as e:
|
|
|
|
|
root_logger.error(e)
|
|
|
|
|
return False
|
|
|
|
|
return True
|
2011-02-17 08:30:36 -05:00
|
|
|
|
2011-12-07 03:40:51 -05:00
|
|
|
def update_ssh_keys(server, hostname, ssh_dir, create_sshfp):
|
2013-07-03 15:52:15 -04:00
|
|
|
if not os.path.isdir(ssh_dir):
|
|
|
|
|
return
|
|
|
|
|
|
2011-12-07 03:40:51 -05:00
|
|
|
pubkeys = []
|
|
|
|
|
for basename in os.listdir(ssh_dir):
|
|
|
|
|
if not basename.endswith('.pub'):
|
|
|
|
|
continue
|
|
|
|
|
filename = os.path.join(ssh_dir, basename)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
f = open(filename, 'r')
|
2015-07-30 16:49:29 +02:00
|
|
|
except IOError as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.warning("Failed to open '%s': %s", filename, str(e))
|
2011-12-07 03:40:51 -05:00
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
for line in f:
|
2012-09-03 09:33:30 -04:00
|
|
|
line = line[:-1].lstrip()
|
|
|
|
|
if not line or line.startswith('#'):
|
2011-12-07 03:40:51 -05:00
|
|
|
continue
|
|
|
|
|
try:
|
2012-09-03 09:33:30 -04:00
|
|
|
pubkey = SSHPublicKey(line)
|
2015-07-30 16:49:29 +02:00
|
|
|
except ValueError as UnicodeDecodeError:
|
2011-12-07 03:40:51 -05:00
|
|
|
continue
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Adding SSH public key from %s", filename)
|
2012-09-03 09:33:30 -04:00
|
|
|
pubkeys.append(pubkey)
|
2011-12-07 03:40:51 -05:00
|
|
|
|
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
|
|
try:
|
2013-10-04 10:23:16 +02:00
|
|
|
# Use the RPC directly so older servers are supported
|
2012-12-19 04:25:24 -05:00
|
|
|
api.Backend.rpcclient.forward(
|
2013-10-04 10:23:16 +02:00
|
|
|
'host_mod',
|
|
|
|
|
unicode(hostname),
|
2012-09-03 09:33:30 -04:00
|
|
|
ipasshpubkey=[pk.openssh() for pk in pubkeys],
|
2013-10-04 10:23:16 +02:00
|
|
|
updatedns=False,
|
|
|
|
|
version=u'2.26', # this version adds support for SSH public keys
|
2012-09-03 09:33:30 -04:00
|
|
|
)
|
2011-12-07 03:40:51 -05:00
|
|
|
except errors.EmptyModlist:
|
|
|
|
|
pass
|
2015-07-30 16:49:29 +02:00
|
|
|
except StandardError as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("host_mod: %s", str(e))
|
|
|
|
|
root_logger.warning("Failed to upload host SSH public keys.")
|
2011-12-07 03:40:51 -05:00
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if create_sshfp:
|
|
|
|
|
ttl = 1200
|
|
|
|
|
|
2014-12-02 11:48:04 +01:00
|
|
|
update_txt = 'debug\n'
|
2013-05-15 14:54:11 +02:00
|
|
|
update_txt += 'update delete %s. IN SSHFP\nshow\nsend\n' % hostname
|
2011-12-07 03:40:51 -05:00
|
|
|
for pubkey in pubkeys:
|
2012-09-03 09:33:30 -04:00
|
|
|
sshfp = pubkey.fingerprint_dns_sha1()
|
2011-12-07 03:40:51 -05:00
|
|
|
if sshfp is not None:
|
|
|
|
|
update_txt += 'update add %s. %s IN SSHFP %s\n' % (hostname, ttl, sshfp)
|
2013-01-08 16:13:07 +01:00
|
|
|
sshfp = pubkey.fingerprint_dns_sha256()
|
|
|
|
|
if sshfp is not None:
|
|
|
|
|
update_txt += 'update add %s. %s IN SSHFP %s\n' % (hostname, ttl, sshfp)
|
2013-05-15 14:54:11 +02:00
|
|
|
update_txt += 'show\nsend\n'
|
2011-12-07 03:40:51 -05:00
|
|
|
|
|
|
|
|
if not do_nsupdate(update_txt):
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.warning("Could not update DNS SSHFP records.")
|
2011-12-07 03:40:51 -05:00
|
|
|
|
2012-09-26 08:52:50 -04:00
|
|
|
def print_port_conf_info():
|
|
|
|
|
root_logger.info(
|
|
|
|
|
"Please make sure the following ports are opened "
|
|
|
|
|
"in the firewall settings:\n"
|
|
|
|
|
" TCP: 80, 88, 389\n"
|
|
|
|
|
" UDP: 88 (at least one of TCP/UDP ports 88 has to be open)\n"
|
|
|
|
|
"Also note that following ports are necessary for ipa-client "
|
|
|
|
|
"working properly after enrollment:\n"
|
|
|
|
|
" TCP: 464\n"
|
|
|
|
|
" UDP: 464, 123 (if NTP enabled)")
|
|
|
|
|
|
2014-10-13 14:30:15 +02:00
|
|
|
def get_certs_from_ldap(server, base_dn, realm, ca_enabled):
|
2014-06-12 13:40:56 +02:00
|
|
|
conn = ipaldap.IPAdmin(server, sasl_nocanon=True)
|
|
|
|
|
try:
|
|
|
|
|
conn.do_sasl_gssapi_bind()
|
2014-10-13 14:30:15 +02:00
|
|
|
certs = certstore.get_ca_certs(conn, base_dn, realm, ca_enabled)
|
2014-06-12 13:40:56 +02:00
|
|
|
except errors.NotFound:
|
|
|
|
|
raise errors.NoCertificateError(entry=server)
|
2015-07-30 16:49:29 +02:00
|
|
|
except errors.NetworkError as e:
|
2014-06-12 13:40:56 +02:00
|
|
|
raise errors.NetworkError(uri=conn.ldap_uri, error=str(e))
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2014-06-12 13:40:56 +02:00
|
|
|
raise errors.LDAPError(str(e))
|
|
|
|
|
finally:
|
|
|
|
|
conn.unbind()
|
|
|
|
|
|
|
|
|
|
return certs
|
|
|
|
|
|
2014-06-12 11:58:28 +02:00
|
|
|
def get_ca_certs_from_file(url):
|
2012-11-15 14:57:52 -05:00
|
|
|
'''
|
|
|
|
|
Get the CA cert from a user supplied file and write it into the
|
|
|
|
|
CACERT file.
|
|
|
|
|
|
|
|
|
|
Raises errors.NoCertificateError if unable to read cert.
|
|
|
|
|
Raises errors.FileError if unable to write cert.
|
|
|
|
|
'''
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
parsed = urlparse.urlparse(url, 'file')
|
2013-09-04 16:04:59 +02:00
|
|
|
except Exception:
|
|
|
|
|
raise errors.FileError(reason="unable to parse file url '%s'" % url)
|
2012-11-15 14:57:52 -05:00
|
|
|
|
|
|
|
|
if parsed.scheme != 'file':
|
2013-09-04 16:04:59 +02:00
|
|
|
raise errors.FileError(reason="url is not a file scheme '%s'" % url)
|
2012-11-15 14:57:52 -05:00
|
|
|
|
|
|
|
|
filename = parsed.path
|
|
|
|
|
|
|
|
|
|
if not os.path.exists(filename):
|
2013-09-04 16:04:59 +02:00
|
|
|
raise errors.FileError(reason="file '%s' does not exist" % filename)
|
2012-11-15 14:57:52 -05:00
|
|
|
|
|
|
|
|
if not os.path.isfile(filename):
|
2013-09-04 16:04:59 +02:00
|
|
|
raise errors.FileError(reason="file '%s' is not a file" % filename)
|
2012-11-15 14:57:52 -05:00
|
|
|
|
|
|
|
|
root_logger.debug("trying to retrieve CA cert from file %s", filename)
|
|
|
|
|
try:
|
2014-06-12 11:58:28 +02:00
|
|
|
certs = x509.load_certificate_list_from_file(filename)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-11-15 14:57:52 -05:00
|
|
|
raise errors.NoCertificateError(entry=filename)
|
|
|
|
|
|
2014-06-12 11:58:28 +02:00
|
|
|
return certs
|
2012-11-15 14:57:52 -05:00
|
|
|
|
2014-06-12 11:58:28 +02:00
|
|
|
def get_ca_certs_from_http(url, warn=True):
|
2012-11-15 14:57:52 -05:00
|
|
|
'''
|
|
|
|
|
Use HTTP to retrieve the CA cert and write it into the CACERT file.
|
|
|
|
|
This is insecure and should be avoided.
|
|
|
|
|
|
|
|
|
|
Raises errors.NoCertificateError if unable to retrieve and write cert.
|
|
|
|
|
'''
|
|
|
|
|
|
|
|
|
|
if warn:
|
|
|
|
|
root_logger.warning("Downloading the CA certificate via HTTP, " +
|
|
|
|
|
"this is INSECURE")
|
|
|
|
|
|
|
|
|
|
root_logger.debug("trying to retrieve CA cert via HTTP from %s", url)
|
|
|
|
|
try:
|
|
|
|
|
|
2014-06-12 11:52:38 +02:00
|
|
|
stdout, stderr, rc = run([paths.BIN_WGET, "-O", "-", url])
|
2015-07-30 16:49:29 +02:00
|
|
|
except CalledProcessError as e:
|
2012-11-15 14:57:52 -05:00
|
|
|
raise errors.NoCertificateError(entry=url)
|
|
|
|
|
|
2014-06-12 11:52:38 +02:00
|
|
|
try:
|
2014-06-12 11:58:28 +02:00
|
|
|
certs = x509.load_certificate_list(stdout)
|
2014-06-12 11:52:38 +02:00
|
|
|
except Exception:
|
|
|
|
|
raise errors.NoCertificateError(entry=url)
|
|
|
|
|
|
2014-06-12 11:58:28 +02:00
|
|
|
return certs
|
2014-06-12 11:52:38 +02:00
|
|
|
|
2014-06-12 12:04:59 +02:00
|
|
|
def get_ca_certs_from_ldap(server, basedn, realm):
|
2012-11-15 14:57:52 -05:00
|
|
|
'''
|
|
|
|
|
Retrieve th CA cert from the LDAP server by binding to the
|
|
|
|
|
server with GSSAPI using the current Kerberos credentials.
|
|
|
|
|
Write the retrieved cert into the CACERT file.
|
|
|
|
|
|
|
|
|
|
Raises errors.NoCertificateError if cert is not found.
|
|
|
|
|
Raises errors.NetworkError if LDAP connection can't be established.
|
|
|
|
|
Raises errors.LDAPError for any other generic LDAP error.
|
|
|
|
|
Raises errors.OnlyOneValueAllowed if more than one cert is found.
|
|
|
|
|
Raises errors.FileError if unable to write cert.
|
|
|
|
|
'''
|
|
|
|
|
|
2013-01-31 07:46:33 -05:00
|
|
|
root_logger.debug("trying to retrieve CA cert via LDAP from %s", server)
|
2012-11-15 14:57:52 -05:00
|
|
|
|
|
|
|
|
try:
|
2014-06-12 13:40:56 +02:00
|
|
|
certs = get_certs_from_ldap(server, basedn, realm, False)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2014-06-12 11:58:28 +02:00
|
|
|
root_logger.debug("get_ca_certs_from_ldap() error: %s", e)
|
2014-06-12 13:40:56 +02:00
|
|
|
raise
|
2012-11-15 14:57:52 -05:00
|
|
|
|
2014-06-12 12:04:59 +02:00
|
|
|
certs = [x509.load_certificate(c[0], x509.DER) for c in certs
|
|
|
|
|
if c[2] is not False]
|
2012-11-15 14:57:52 -05:00
|
|
|
|
2014-06-12 12:04:59 +02:00
|
|
|
return certs
|
2012-11-15 14:57:52 -05:00
|
|
|
|
2014-06-12 11:58:28 +02:00
|
|
|
def validate_new_ca_certs(existing_ca_certs, new_ca_certs, ask,
|
|
|
|
|
override=False):
|
|
|
|
|
if existing_ca_certs is None:
|
2012-11-15 14:57:52 -05:00
|
|
|
root_logger.info(
|
2014-06-12 11:58:28 +02:00
|
|
|
cert_summary("Successfully retrieved CA cert", new_ca_certs))
|
2012-11-15 14:57:52 -05:00
|
|
|
return
|
|
|
|
|
|
2014-06-12 11:58:28 +02:00
|
|
|
existing_ca_certs = set(existing_ca_certs)
|
|
|
|
|
new_ca_certs = set(new_ca_certs)
|
|
|
|
|
if existing_ca_certs > new_ca_certs:
|
2012-11-15 14:57:52 -05:00
|
|
|
root_logger.warning(
|
|
|
|
|
"The CA cert available from the IPA server does not match the\n"
|
|
|
|
|
"local certificate available at %s" % CACERT)
|
|
|
|
|
root_logger.warning(
|
2014-06-12 11:58:28 +02:00
|
|
|
cert_summary("Existing CA cert:", existing_ca_certs))
|
2012-11-15 14:57:52 -05:00
|
|
|
root_logger.warning(
|
2014-06-12 11:58:28 +02:00
|
|
|
cert_summary("Retrieved CA cert:", new_ca_certs))
|
2012-11-15 14:57:52 -05:00
|
|
|
if override:
|
|
|
|
|
root_logger.warning("Overriding existing CA cert\n")
|
|
|
|
|
elif not ask or not user_input(
|
|
|
|
|
"Do you want to replace the local certificate with the CA\n"
|
|
|
|
|
"certificate retrieved from the IPA server?", True):
|
|
|
|
|
raise errors.CertificateInvalidError(name='Retrieved CA')
|
|
|
|
|
else:
|
|
|
|
|
root_logger.debug(
|
|
|
|
|
"Existing CA cert and Retrieved CA cert are identical")
|
|
|
|
|
|
2014-06-12 12:04:59 +02:00
|
|
|
def get_ca_certs(fstore, options, server, basedn, realm):
|
2012-11-15 14:57:52 -05:00
|
|
|
'''
|
|
|
|
|
Examine the different options and determine a method for obtaining
|
|
|
|
|
the CA cert.
|
|
|
|
|
|
|
|
|
|
If successful the CA cert will have been written into CACERT.
|
|
|
|
|
|
|
|
|
|
Raises errors.NoCertificateError if not successful.
|
|
|
|
|
|
|
|
|
|
The logic for determining how to load the CA cert is as follow:
|
|
|
|
|
|
|
|
|
|
In the OTP case (not -p and -w):
|
|
|
|
|
|
|
|
|
|
1. load from user supplied cert file
|
|
|
|
|
2. else load from HTTP
|
|
|
|
|
|
|
|
|
|
In the 'user_auth' case ((-p and -w) or interactive):
|
|
|
|
|
|
|
|
|
|
1. load from user supplied cert file
|
|
|
|
|
2. load from LDAP using SASL/GSS/Krb5 auth
|
|
|
|
|
(provides mutual authentication, integrity and security)
|
|
|
|
|
3. if LDAP failed and interactive ask for permission to
|
|
|
|
|
use insecure HTTP (default: No)
|
|
|
|
|
|
|
|
|
|
In the unattended case:
|
|
|
|
|
|
|
|
|
|
1. load from user supplied cert file
|
|
|
|
|
2. load from HTTP if --force specified else fail
|
|
|
|
|
|
|
|
|
|
In all cases if HTTP is used emit warning message
|
|
|
|
|
'''
|
|
|
|
|
|
|
|
|
|
ca_file = CACERT + ".new"
|
|
|
|
|
|
|
|
|
|
def ldap_url():
|
|
|
|
|
return urlparse.urlunparse(('ldap', ipautil.format_netloc(server),
|
|
|
|
|
'', '', '', ''))
|
|
|
|
|
|
|
|
|
|
def file_url():
|
|
|
|
|
return urlparse.urlunparse(('file', '', options.ca_cert_file,
|
|
|
|
|
'', '', ''))
|
|
|
|
|
|
|
|
|
|
def http_url():
|
|
|
|
|
return urlparse.urlunparse(('http', ipautil.format_netloc(server),
|
|
|
|
|
'/ipa/config/ca.crt', '', '', ''))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
interactive = not options.unattended
|
|
|
|
|
otp_auth = options.principal is None and options.password is not None
|
2014-06-12 11:58:28 +02:00
|
|
|
existing_ca_certs = None
|
|
|
|
|
ca_certs = None
|
2012-11-15 14:57:52 -05:00
|
|
|
|
|
|
|
|
if options.ca_cert_file:
|
|
|
|
|
url = file_url()
|
|
|
|
|
try:
|
2014-06-12 11:58:28 +02:00
|
|
|
ca_certs = get_ca_certs_from_file(url)
|
2015-07-30 16:49:29 +02:00
|
|
|
except errors.FileError as e:
|
2013-04-19 14:32:20 +02:00
|
|
|
root_logger.debug(e)
|
|
|
|
|
raise
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-11-15 14:57:52 -05:00
|
|
|
root_logger.debug(e)
|
|
|
|
|
raise errors.NoCertificateError(entry=url)
|
|
|
|
|
root_logger.debug("CA cert provided by user, use it!")
|
|
|
|
|
else:
|
|
|
|
|
if os.path.exists(CACERT):
|
|
|
|
|
if os.path.isfile(CACERT):
|
|
|
|
|
try:
|
2014-06-12 11:58:28 +02:00
|
|
|
existing_ca_certs = x509.load_certificate_list_from_file(
|
|
|
|
|
CACERT)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-11-15 14:57:52 -05:00
|
|
|
raise errors.FileError(reason=u"Unable to load existing" +
|
|
|
|
|
" CA cert '%s': %s" % (CACERT, e))
|
|
|
|
|
else:
|
|
|
|
|
raise errors.FileError(reason=u"Existing ca cert '%s' is " +
|
|
|
|
|
"not a plain file" % (CACERT))
|
|
|
|
|
|
|
|
|
|
if otp_auth:
|
2014-06-12 11:58:28 +02:00
|
|
|
if existing_ca_certs:
|
2012-11-15 14:57:52 -05:00
|
|
|
root_logger.info("OTP case, CA cert preexisted, use it")
|
|
|
|
|
else:
|
|
|
|
|
url = http_url()
|
|
|
|
|
override = not interactive
|
|
|
|
|
if interactive and not user_input(
|
|
|
|
|
"Do you want download the CA cert from " + url + " ?\n"
|
|
|
|
|
"(this is INSECURE)", False):
|
|
|
|
|
raise errors.NoCertificateError(message=u"HTTP certificate"
|
|
|
|
|
" download declined by user")
|
|
|
|
|
try:
|
2014-06-12 11:58:28 +02:00
|
|
|
ca_certs = get_ca_certs_from_http(url, override)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-11-15 14:57:52 -05:00
|
|
|
root_logger.debug(e)
|
|
|
|
|
raise errors.NoCertificateError(entry=url)
|
|
|
|
|
|
2014-06-12 11:58:28 +02:00
|
|
|
validate_new_ca_certs(existing_ca_certs, ca_certs, False,
|
|
|
|
|
override)
|
2012-11-15 14:57:52 -05:00
|
|
|
else:
|
|
|
|
|
# Auth with user credentials
|
|
|
|
|
try:
|
2013-01-31 07:46:33 -05:00
|
|
|
url = ldap_url()
|
2014-06-12 12:04:59 +02:00
|
|
|
ca_certs = get_ca_certs_from_ldap(server, basedn, realm)
|
2014-06-12 11:58:28 +02:00
|
|
|
validate_new_ca_certs(existing_ca_certs, ca_certs, interactive)
|
2015-07-30 16:49:29 +02:00
|
|
|
except errors.FileError as e:
|
2013-04-19 14:32:20 +02:00
|
|
|
root_logger.debug(e)
|
|
|
|
|
raise
|
2015-07-30 16:49:29 +02:00
|
|
|
except (errors.NoCertificateError, errors.LDAPError) as e:
|
2012-11-15 14:57:52 -05:00
|
|
|
root_logger.debug(str(e))
|
|
|
|
|
url = http_url()
|
2014-06-12 11:58:28 +02:00
|
|
|
if existing_ca_certs:
|
2012-11-15 14:57:52 -05:00
|
|
|
root_logger.warning(
|
|
|
|
|
"Unable to download CA cert from LDAP\n"
|
|
|
|
|
"but found preexisting cert, using it.\n")
|
|
|
|
|
elif interactive and not user_input(
|
|
|
|
|
"Unable to download CA cert from LDAP.\n"
|
|
|
|
|
"Do you want to download the CA cert from " + url + "?\n"
|
|
|
|
|
"(this is INSECURE)", False):
|
|
|
|
|
raise errors.NoCertificateError(message=u"HTTP "
|
|
|
|
|
"certificate download declined by user")
|
|
|
|
|
elif not interactive and not options.force:
|
|
|
|
|
root_logger.error(
|
|
|
|
|
"In unattended mode without a One Time Password "
|
|
|
|
|
"(OTP) or without --ca-cert-file\nYou must specify"
|
|
|
|
|
" --force to retrieve the CA cert using HTTP")
|
|
|
|
|
raise errors.NoCertificateError(message=u"HTTP "
|
|
|
|
|
"certificate download requires --force")
|
|
|
|
|
else:
|
|
|
|
|
try:
|
2014-06-12 11:58:28 +02:00
|
|
|
ca_certs = get_ca_certs_from_http(url)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-11-15 14:57:52 -05:00
|
|
|
root_logger.debug(e)
|
|
|
|
|
raise errors.NoCertificateError(entry=url)
|
2014-06-12 11:58:28 +02:00
|
|
|
validate_new_ca_certs(existing_ca_certs, ca_certs,
|
|
|
|
|
interactive)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-11-15 14:57:52 -05:00
|
|
|
root_logger.debug(str(e))
|
|
|
|
|
raise errors.NoCertificateError(entry=url)
|
|
|
|
|
|
2014-06-12 11:58:28 +02:00
|
|
|
if ca_certs is None and existing_ca_certs is None:
|
2012-11-15 14:57:52 -05:00
|
|
|
raise errors.InternalError(u"expected CA cert file '%s' to "
|
|
|
|
|
u"exist, but it's absent" % (ca_file))
|
|
|
|
|
|
2014-06-12 11:58:28 +02:00
|
|
|
if ca_certs is not None:
|
2014-06-12 11:52:38 +02:00
|
|
|
try:
|
2014-06-12 11:58:28 +02:00
|
|
|
ca_certs = [cert.der_data for cert in ca_certs]
|
|
|
|
|
x509.write_certificate_list(ca_certs, ca_file)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2014-06-12 11:52:38 +02:00
|
|
|
if os.path.exists(ca_file):
|
|
|
|
|
try:
|
|
|
|
|
os.unlink(ca_file)
|
2015-07-30 16:49:29 +02:00
|
|
|
except OSError as e:
|
2014-06-12 11:52:38 +02:00
|
|
|
root_logger.error(
|
|
|
|
|
"Failed to remove '%s': %s", ca_file, e)
|
|
|
|
|
raise errors.FileError(reason =
|
|
|
|
|
u"cannot write certificate file '%s': %s" % (ca_file, e))
|
|
|
|
|
|
|
|
|
|
os.rename(ca_file, CACERT)
|
2012-11-15 14:57:52 -05:00
|
|
|
|
|
|
|
|
# Make sure the file permissions are correct
|
|
|
|
|
try:
|
2015-07-15 16:38:06 +02:00
|
|
|
os.chmod(CACERT, 0o644)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-11-15 14:57:52 -05:00
|
|
|
raise errors.FileError(reason=u"Unable set permissions on ca "
|
|
|
|
|
u"cert '%s': %s" % (CACERT, e))
|
|
|
|
|
|
2013-11-04 11:52:02 +01:00
|
|
|
#IMPORTANT First line of FF config file is ignored
|
|
|
|
|
FIREFOX_CONFIG_TEMPLATE = """
|
|
|
|
|
|
|
|
|
|
/* Kerberos SSO configuration */
|
|
|
|
|
pref("network.negotiate-auth.trusted-uris", ".$DOMAIN");
|
|
|
|
|
|
|
|
|
|
/* These are the defaults */
|
|
|
|
|
pref("network.negotiate-auth.gsslib", "");
|
|
|
|
|
pref("network.negotiate-auth.using-native-gsslib", true);
|
|
|
|
|
pref("network.negotiate-auth.allow-proxies", true);
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
FIREFOX_PREFERENCES_FILENAME = "all-ipa.js"
|
2014-05-29 10:44:57 +02:00
|
|
|
FIREFOX_PREFERENCES_REL_PATH = "browser/defaults/preferences"
|
2013-11-04 11:52:02 +01:00
|
|
|
|
|
|
|
|
def configure_firefox(options, statestore, domain):
|
|
|
|
|
try:
|
|
|
|
|
root_logger.debug("Setting up Firefox configuration.")
|
|
|
|
|
|
|
|
|
|
preferences_dir = None
|
|
|
|
|
|
|
|
|
|
# Check user specified location of firefox install directory
|
|
|
|
|
if options.firefox_dir is not None:
|
|
|
|
|
pref_path = os.path.join(options.firefox_dir,
|
2014-05-29 10:44:57 +02:00
|
|
|
FIREFOX_PREFERENCES_REL_PATH)
|
2013-11-04 11:52:02 +01:00
|
|
|
if dir_exists(pref_path):
|
|
|
|
|
preferences_dir = pref_path
|
|
|
|
|
else:
|
|
|
|
|
root_logger.error("Directory '%s' does not exists." % pref_path)
|
|
|
|
|
else:
|
|
|
|
|
# test if firefox is installed
|
2014-05-29 10:44:57 +02:00
|
|
|
if file_exists(paths.FIREFOX):
|
2013-11-04 11:52:02 +01:00
|
|
|
|
|
|
|
|
# find valid preferences path
|
2014-05-29 10:44:57 +02:00
|
|
|
for path in [paths.LIB_FIREFOX, paths.LIB64_FIREFOX]:
|
2013-11-04 11:52:02 +01:00
|
|
|
pref_path = os.path.join(path,
|
2014-05-29 10:44:57 +02:00
|
|
|
FIREFOX_PREFERENCES_REL_PATH)
|
2013-11-04 11:52:02 +01:00
|
|
|
if dir_exists(pref_path):
|
|
|
|
|
preferences_dir = pref_path
|
|
|
|
|
break
|
|
|
|
|
else:
|
|
|
|
|
root_logger.error("Firefox configuration skipped (Firefox not found).")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# setting up firefox
|
|
|
|
|
if preferences_dir is not None:
|
|
|
|
|
|
|
|
|
|
# user could specify relative path, we need to store absolute
|
|
|
|
|
preferences_dir = os.path.abspath(preferences_dir)
|
|
|
|
|
root_logger.debug("Firefox preferences directory found '%s'." % preferences_dir)
|
|
|
|
|
preferences_fname = os.path.join(preferences_dir, FIREFOX_PREFERENCES_FILENAME)
|
|
|
|
|
update_txt = ipautil.template_str(FIREFOX_CONFIG_TEMPLATE, dict(DOMAIN=domain))
|
|
|
|
|
root_logger.debug("Firefox trusted and delegation uris will be set as '.%s' domain." % domain)
|
|
|
|
|
root_logger.debug("Firefox configuration will be stored in '%s' file." % preferences_fname)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
with open(preferences_fname, 'w') as f:
|
|
|
|
|
f.write(update_txt)
|
|
|
|
|
root_logger.info("Firefox sucessfully configured.")
|
|
|
|
|
statestore.backup_state('firefox', 'preferences_fname', preferences_fname)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2013-11-04 11:52:02 +01:00
|
|
|
root_logger.debug("An error occured during creating preferences file: %s." % str(e))
|
|
|
|
|
root_logger.error("Firefox configuration failed.")
|
|
|
|
|
else:
|
|
|
|
|
root_logger.debug("Firefox preferences directory not found.")
|
|
|
|
|
root_logger.error("Firefox configuration failed.")
|
|
|
|
|
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2013-11-04 11:52:02 +01:00
|
|
|
root_logger.debug(str(e))
|
|
|
|
|
root_logger.error("Firefox configuration failed.")
|
|
|
|
|
|
2012-11-15 14:57:52 -05:00
|
|
|
|
2011-08-29 17:44:02 -04:00
|
|
|
def install(options, env, fstore, statestore):
|
2010-09-17 21:23:08 -04:00
|
|
|
dnsok = False
|
2010-05-06 16:41:59 -04:00
|
|
|
|
2008-04-09 15:55:46 -04:00
|
|
|
cli_domain = None
|
|
|
|
|
cli_server = None
|
2010-11-01 13:51:14 -04:00
|
|
|
subject_base = None
|
2010-04-05 16:27:46 -04:00
|
|
|
|
2012-06-13 11:44:06 -04:00
|
|
|
cli_domain_source = 'Unknown source'
|
|
|
|
|
cli_server_source = 'Unknown source'
|
|
|
|
|
|
2012-12-07 16:44:32 +01:00
|
|
|
if options.conf_ntp and not options.on_master and not options.force_ntpd:
|
|
|
|
|
try:
|
|
|
|
|
ipaclient.ntpconf.check_timedate_services()
|
2015-07-30 16:49:29 +02:00
|
|
|
except ipaclient.ntpconf.NTPConflictingService as e:
|
2012-12-07 16:44:32 +01:00
|
|
|
print "WARNING: ntpd time&date synchronization service will not" \
|
|
|
|
|
" be configured as"
|
|
|
|
|
print "conflicting service (%s) is enabled" % e.conflicting_service
|
|
|
|
|
print "Use --force-ntpd option to disable it and force configuration" \
|
|
|
|
|
" of ntpd"
|
|
|
|
|
print ""
|
|
|
|
|
|
|
|
|
|
# configuration of ntpd is disabled in this case
|
|
|
|
|
options.conf_ntp = False
|
|
|
|
|
except ipaclient.ntpconf.NTPConfigurationError:
|
|
|
|
|
pass
|
|
|
|
|
|
2013-02-26 13:20:13 +01:00
|
|
|
if options.unattended and (options.password is None and
|
|
|
|
|
options.principal is None and
|
|
|
|
|
options.keytab is None and
|
|
|
|
|
options.prompt_password is False and
|
|
|
|
|
not options.on_master):
|
|
|
|
|
root_logger.error("One of password / principal / keytab is required.")
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2009-11-19 14:14:42 -05:00
|
|
|
|
2011-03-17 10:22:33 -04:00
|
|
|
if options.hostname:
|
|
|
|
|
hostname = options.hostname
|
2012-06-13 11:44:06 -04:00
|
|
|
hostname_source = 'Provided as option'
|
2011-03-17 10:22:33 -04:00
|
|
|
else:
|
|
|
|
|
hostname = socket.getfqdn()
|
2012-06-13 11:44:06 -04:00
|
|
|
hostname_source = "Machine's FQDN"
|
2011-03-17 10:22:33 -04:00
|
|
|
if hostname != hostname.lower():
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
|
|
|
|
"Invalid hostname '%s', must be lower-case.", hostname)
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2012-01-20 13:44:48 +01:00
|
|
|
if (hostname == 'localhost') or (hostname == 'localhost.localdomain'):
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Invalid hostname, '%s' must not be used.", hostname)
|
2012-01-20 13:44:48 +01:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2011-03-17 10:22:33 -04:00
|
|
|
|
2011-12-05 10:19:10 +01:00
|
|
|
# when installing with '--no-sssd' option, check whether nss-ldap is installed
|
|
|
|
|
if not options.sssd:
|
|
|
|
|
(nssldap_installed, nosssd_files) = nssldap_exists()
|
|
|
|
|
if not nssldap_installed:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("One of these packages must be installed: " +
|
|
|
|
|
"nss_ldap or nss-pam-ldapd")
|
2011-12-05 10:19:10 +01:00
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
2013-06-05 15:52:47 +02:00
|
|
|
if options.keytab and options.principal:
|
|
|
|
|
root_logger.error("Options 'principal' and 'keytab' cannot be used "
|
|
|
|
|
"together.")
|
|
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
|
|
|
|
if options.keytab and options.force_join:
|
|
|
|
|
root_logger.warning("Option 'force-join' has no additional effect "
|
|
|
|
|
"when used with together with option 'keytab'.")
|
|
|
|
|
|
2013-10-15 11:31:49 +02:00
|
|
|
# Check if old certificate exist and show warning
|
|
|
|
|
if not options.ca_cert_file and get_cert_path(options.ca_cert_file) == CACERT:
|
|
|
|
|
root_logger.warning("Using existing certificate '%s'.", CACERT)
|
|
|
|
|
|
2015-08-18 19:45:23 +02:00
|
|
|
if not check_ip_addresses(options):
|
|
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
2007-08-16 18:00:16 -04:00
|
|
|
# Create the discovery instance
|
2011-07-06 10:30:24 -04:00
|
|
|
ds = ipadiscovery.IPADiscovery()
|
2007-08-16 18:00:16 -04:00
|
|
|
|
2014-08-27 12:31:09 +02:00
|
|
|
ret = ds.search(domain=options.domain, servers=options.server, realm=options.realm_name, hostname=hostname, ca_cert_path=get_cert_path(options.ca_cert_file))
|
2011-05-18 17:06:15 +02:00
|
|
|
|
2013-03-13 14:44:20 +01:00
|
|
|
if options.server and ret != 0:
|
|
|
|
|
# There is no point to continue with installation as server list was
|
|
|
|
|
# passed as a fixed list of server and thus we cannot discover any
|
|
|
|
|
# better result
|
|
|
|
|
root_logger.error("Failed to verify that %s is an IPA Server.",
|
|
|
|
|
', '.join(options.server))
|
|
|
|
|
root_logger.error("This may mean that the remote server is not up "
|
|
|
|
|
"or is not reachable due to network or firewall settings.")
|
|
|
|
|
print_port_conf_info()
|
|
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
2011-07-06 10:30:24 -04:00
|
|
|
if ret == ipadiscovery.BAD_HOST_CONFIG:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Can't get the fully qualified name of this host")
|
|
|
|
|
root_logger.info("Check that the client is properly configured")
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2011-07-06 10:30:24 -04:00
|
|
|
if ret == ipadiscovery.NOT_FQDN:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("%s is not a fully-qualified hostname", hostname)
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2011-10-12 10:55:08 +02:00
|
|
|
if ret in (ipadiscovery.NO_LDAP_SERVER, ipadiscovery.NOT_IPA_SERVER) \
|
2012-06-13 11:44:06 -04:00
|
|
|
or not ds.domain:
|
|
|
|
|
if ret == ipadiscovery.NO_LDAP_SERVER:
|
|
|
|
|
if ds.server:
|
|
|
|
|
root_logger.debug("%s is not an LDAP server" % ds.server)
|
|
|
|
|
else:
|
|
|
|
|
root_logger.debug("No LDAP server found")
|
|
|
|
|
elif ret == ipadiscovery.NOT_IPA_SERVER:
|
|
|
|
|
if ds.server:
|
|
|
|
|
root_logger.debug("%s is not an IPA server" % ds.server)
|
|
|
|
|
else:
|
|
|
|
|
root_logger.debug("No IPA server found")
|
|
|
|
|
else:
|
|
|
|
|
root_logger.debug("Domain not found")
|
2007-08-16 18:00:16 -04:00
|
|
|
if options.domain:
|
2008-04-09 15:55:46 -04:00
|
|
|
cli_domain = options.domain
|
2012-06-13 11:44:06 -04:00
|
|
|
cli_domain_source = 'Provided as option'
|
2007-08-16 18:00:16 -04:00
|
|
|
elif options.unattended:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
|
|
|
|
"Unable to discover domain, not provided on command line")
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2007-08-16 18:00:16 -04:00
|
|
|
else:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info(
|
|
|
|
|
"DNS discovery failed to determine your DNS domain")
|
2011-07-29 13:05:07 +03:00
|
|
|
cli_domain = user_input("Provide the domain name of your IPA server (ex: example.com)", allow_empty = False)
|
2012-06-13 11:44:06 -04:00
|
|
|
cli_domain_source = 'Provided interactively'
|
|
|
|
|
root_logger.debug(
|
|
|
|
|
"will use interactively provided domain: %s", cli_domain)
|
2013-02-04 09:35:13 -05:00
|
|
|
ret = ds.search(domain=cli_domain, servers=options.server, hostname=hostname, ca_cert_path=get_cert_path(options.ca_cert_file))
|
2011-05-18 17:06:15 +02:00
|
|
|
|
2008-04-09 15:55:46 -04:00
|
|
|
if not cli_domain:
|
2012-06-13 11:44:06 -04:00
|
|
|
if ds.domain:
|
|
|
|
|
cli_domain = ds.domain
|
|
|
|
|
cli_domain_source = ds.domain_source
|
|
|
|
|
root_logger.debug("will use discovered domain: %s", cli_domain)
|
2008-04-09 15:55:46 -04:00
|
|
|
|
2011-10-21 11:18:26 +02:00
|
|
|
client_domain = hostname[hostname.find(".")+1:]
|
|
|
|
|
|
2011-10-12 10:55:08 +02:00
|
|
|
if ret in (ipadiscovery.NO_LDAP_SERVER, ipadiscovery.NOT_IPA_SERVER) \
|
2012-06-13 11:44:06 -04:00
|
|
|
or not ds.server:
|
2011-11-15 14:39:31 -05:00
|
|
|
root_logger.debug("IPA Server not found")
|
2007-08-30 19:40:54 -04:00
|
|
|
if options.server:
|
2008-04-09 15:55:46 -04:00
|
|
|
cli_server = options.server
|
2012-06-13 11:44:06 -04:00
|
|
|
cli_server_source = 'Provided as option'
|
2007-08-30 19:40:54 -04:00
|
|
|
elif options.unattended:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Unable to find IPA Server to join")
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2007-08-30 19:40:54 -04:00
|
|
|
else:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.debug("DNS discovery failed to find the IPA Server")
|
2012-07-03 17:37:22 -04:00
|
|
|
cli_server = [user_input("Provide your IPA server name (ex: ipa.example.com)", allow_empty = False)]
|
2012-06-13 11:44:06 -04:00
|
|
|
cli_server_source = 'Provided interactively'
|
2012-07-03 17:37:22 -04:00
|
|
|
root_logger.debug("will use interactively provided server: %s", cli_server[0])
|
2013-02-04 09:35:13 -05:00
|
|
|
ret = ds.search(domain=cli_domain, servers=cli_server, hostname=hostname, ca_cert_path=get_cert_path(options.ca_cert_file))
|
2012-07-03 17:37:22 -04:00
|
|
|
|
2010-09-17 21:23:08 -04:00
|
|
|
else:
|
2012-07-03 17:37:22 -04:00
|
|
|
# Only set dnsok to True if we were not passed in one or more servers
|
|
|
|
|
# and if DNS discovery actually worked.
|
|
|
|
|
if not options.server:
|
|
|
|
|
(server, domain) = ds.check_domain(ds.domain, set(), "Validating DNS Discovery")
|
|
|
|
|
if server and domain:
|
|
|
|
|
root_logger.debug("DNS validated, enabling discovery")
|
|
|
|
|
dnsok = True
|
|
|
|
|
else:
|
|
|
|
|
root_logger.debug("DNS discovery failed, disabling discovery")
|
|
|
|
|
else:
|
|
|
|
|
root_logger.debug("Using servers from command line, disabling DNS discovery")
|
|
|
|
|
|
2008-04-09 15:55:46 -04:00
|
|
|
if not cli_server:
|
2012-07-03 17:37:22 -04:00
|
|
|
if options.server:
|
2013-02-04 09:35:13 -05:00
|
|
|
cli_server = ds.servers
|
2012-07-03 17:37:22 -04:00
|
|
|
cli_server_source = 'Provided as option'
|
|
|
|
|
root_logger.debug("will use provided server: %s", ', '.join(options.server))
|
|
|
|
|
elif ds.server:
|
2013-02-04 09:35:13 -05:00
|
|
|
cli_server = ds.servers
|
2012-06-13 11:44:06 -04:00
|
|
|
cli_server_source = ds.server_source
|
2012-07-03 17:37:22 -04:00
|
|
|
root_logger.debug("will use discovered server: %s", cli_server[0])
|
2008-04-09 15:55:46 -04:00
|
|
|
|
2011-07-06 10:30:24 -04:00
|
|
|
if ret == ipadiscovery.NOT_IPA_SERVER:
|
2012-07-03 17:37:22 -04:00
|
|
|
root_logger.error("%s is not an IPA v2 Server.", cli_server[0])
|
2012-09-26 08:52:50 -04:00
|
|
|
print_port_conf_info()
|
2012-07-03 17:37:22 -04:00
|
|
|
root_logger.debug("(%s: %s)", cli_server[0], cli_server_source)
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2011-09-28 16:31:38 -04:00
|
|
|
|
|
|
|
|
if ret == ipadiscovery.NO_ACCESS_TO_LDAP:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.warning("Anonymous access to the LDAP server is disabled.")
|
|
|
|
|
root_logger.info("Proceeding without strict verification.")
|
|
|
|
|
root_logger.info("Note: This is not an error if anonymous access " +
|
|
|
|
|
"has been explicitly restricted.")
|
2011-09-28 16:31:38 -04:00
|
|
|
ret = 0
|
|
|
|
|
|
2012-11-15 14:57:52 -05:00
|
|
|
if ret == ipadiscovery.NO_TLS_LDAP:
|
|
|
|
|
root_logger.warning("The LDAP server requires TLS is but we do not " +
|
|
|
|
|
"have the CA.")
|
|
|
|
|
root_logger.info("Proceeding without strict verification.")
|
|
|
|
|
ret = 0
|
|
|
|
|
|
2007-08-30 19:40:54 -04:00
|
|
|
if ret != 0:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Failed to verify that %s is an IPA Server.",
|
2012-07-03 17:37:22 -04:00
|
|
|
cli_server[0])
|
2012-09-26 08:52:50 -04:00
|
|
|
root_logger.error("This may mean that the remote server is not up "
|
2012-06-08 09:36:38 -04:00
|
|
|
"or is not reachable due to network or firewall settings.")
|
2012-09-26 08:52:50 -04:00
|
|
|
print_port_conf_info()
|
2012-07-03 17:37:22 -04:00
|
|
|
root_logger.debug("(%s: %s)", cli_server[0], cli_server_source)
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2007-08-30 19:40:54 -04:00
|
|
|
|
2012-06-13 11:44:06 -04:00
|
|
|
cli_kdc = ds.kdc
|
2011-03-21 14:50:05 +01:00
|
|
|
if dnsok and not cli_kdc:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("DNS domain '%s' is not configured for automatic " +
|
2012-06-13 11:44:06 -04:00
|
|
|
"KDC address lookup.", ds.realm.lower())
|
|
|
|
|
root_logger.debug("(%s: %s)", ds.realm, ds.realm_source)
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("KDC address will be set to fixed value.")
|
2011-03-21 14:50:05 +01:00
|
|
|
|
2007-08-30 19:40:54 -04:00
|
|
|
if dnsok:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Discovery was successful!")
|
2007-08-30 19:40:54 -04:00
|
|
|
elif not options.unattended:
|
2012-07-03 17:37:22 -04:00
|
|
|
if not options.server:
|
|
|
|
|
root_logger.warning("The failure to use DNS to find your IPA" +
|
|
|
|
|
" server indicates that your resolv.conf file is not properly" +
|
|
|
|
|
" configured.")
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Autodiscovery of servers for failover cannot work " +
|
|
|
|
|
"with this configuration.")
|
|
|
|
|
root_logger.info("If you proceed with the installation, services " +
|
|
|
|
|
"will be configured to always access the discovered server for " +
|
|
|
|
|
"all operations and will not fail over to other servers in case " +
|
|
|
|
|
"of failure.")
|
2011-02-09 13:51:35 -05:00
|
|
|
if not user_input("Proceed with fixed values and no DNS discovery?", False):
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2007-08-30 19:40:54 -04:00
|
|
|
|
2012-06-13 11:44:06 -04:00
|
|
|
cli_realm = ds.realm
|
|
|
|
|
cli_realm_source = ds.realm_source
|
|
|
|
|
root_logger.debug("will use discovered realm: %s", cli_realm)
|
|
|
|
|
|
|
|
|
|
if options.realm_name and options.realm_name != cli_realm:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
|
|
|
|
"The provided realm name [%s] does not match discovered one [%s]",
|
2012-06-13 11:44:06 -04:00
|
|
|
options.realm_name, cli_realm)
|
|
|
|
|
root_logger.debug("(%s: %s)", cli_realm, cli_realm_source)
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2008-02-19 15:57:53 -05:00
|
|
|
|
2012-06-13 11:44:06 -04:00
|
|
|
cli_basedn = ds.basedn
|
|
|
|
|
cli_basedn_source = ds.basedn_source
|
|
|
|
|
root_logger.debug("will use discovered basedn: %s", cli_basedn)
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 07:36:35 -04:00
|
|
|
subject_base = DN(('O', cli_realm))
|
2008-04-09 15:55:46 -04:00
|
|
|
|
2015-04-13 17:02:24 +02:00
|
|
|
root_logger.info("Client hostname: %s", hostname)
|
2012-06-13 11:44:06 -04:00
|
|
|
root_logger.debug("Hostname source: %s", hostname_source)
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Realm: %s", cli_realm)
|
2012-06-13 11:44:06 -04:00
|
|
|
root_logger.debug("Realm source: %s", cli_realm_source)
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("DNS Domain: %s", cli_domain)
|
2012-06-13 11:44:06 -04:00
|
|
|
root_logger.debug("DNS Domain source: %s", cli_domain_source)
|
2012-07-03 17:37:22 -04:00
|
|
|
root_logger.info("IPA Server: %s", ', '.join(cli_server))
|
2012-06-13 11:44:06 -04:00
|
|
|
root_logger.debug("IPA Server source: %s", cli_server_source)
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("BaseDN: %s", cli_basedn)
|
2012-06-13 11:44:06 -04:00
|
|
|
root_logger.debug("BaseDN source: %s", cli_basedn_source)
|
2007-08-30 19:40:54 -04:00
|
|
|
|
2015-08-11 10:10:48 +02:00
|
|
|
# ipa-join would fail with IP address instead of a FQDN
|
|
|
|
|
for srv in cli_server:
|
|
|
|
|
try:
|
|
|
|
|
socket.inet_pton(socket.AF_INET, srv)
|
|
|
|
|
is_ipaddr = True
|
|
|
|
|
except:
|
|
|
|
|
try:
|
|
|
|
|
socket.inet_pton(socket.AF_INET6, srv)
|
|
|
|
|
is_ipaddr = True
|
|
|
|
|
except:
|
|
|
|
|
is_ipaddr = False
|
|
|
|
|
|
|
|
|
|
if is_ipaddr:
|
|
|
|
|
print
|
|
|
|
|
root_logger.warning("It seems that you are using an IP address "
|
|
|
|
|
"instead of FQDN as an argument to --server. The "
|
|
|
|
|
"installation may fail.")
|
|
|
|
|
break
|
|
|
|
|
|
2012-06-08 09:36:38 -04:00
|
|
|
print
|
2008-07-21 12:25:37 +02:00
|
|
|
if not options.unattended and not user_input("Continue to configure the system with these values?", False):
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
2012-06-06 10:44:06 -04:00
|
|
|
if not options.on_master:
|
|
|
|
|
# Try removing old principals from the keytab
|
|
|
|
|
try:
|
2014-06-17 11:45:43 +02:00
|
|
|
ipautil.run([paths.IPA_RMKEYTAB,
|
|
|
|
|
'-k', paths.KRB5_KEYTAB, '-r', cli_realm])
|
2015-07-30 16:49:29 +02:00
|
|
|
except CalledProcessError as e:
|
2012-06-06 10:44:06 -04:00
|
|
|
if e.returncode not in (3, 5):
|
|
|
|
|
# 3 - Unable to open keytab
|
|
|
|
|
# 5 - Principal name or realm not found in keytab
|
|
|
|
|
root_logger.error("Error trying to clean keytab: " +
|
|
|
|
|
"/usr/sbin/ipa-rmkeytab returned %s" % e.returncode)
|
|
|
|
|
else:
|
|
|
|
|
root_logger.info("Removed old keys for realm %s from %s" % (
|
2014-06-17 11:45:43 +02:00
|
|
|
cli_realm, paths.KRB5_KEYTAB))
|
2012-06-06 10:44:06 -04:00
|
|
|
|
2011-10-13 12:16:15 +02:00
|
|
|
if options.hostname and not options.on_master:
|
2011-08-29 10:22:20 +02:00
|
|
|
# configure /etc/sysconfig/network to contain the hostname we set.
|
2011-10-13 12:16:15 +02:00
|
|
|
# skip this step when run by ipa-server-install as it always configures
|
|
|
|
|
# hostname if different from system hostname
|
2014-05-29 10:18:21 +02:00
|
|
|
tasks.backup_and_replace_hostname(fstore, statestore, options.hostname)
|
2013-11-21 13:09:28 +01:00
|
|
|
|
2015-04-15 14:32:17 +02:00
|
|
|
ntp_srv_servers = []
|
2015-02-25 14:22:02 -08:00
|
|
|
if not options.on_master and options.conf_ntp:
|
2014-02-18 19:55:56 -07:00
|
|
|
# Attempt to sync time with IPA server.
|
2015-02-25 14:22:02 -08:00
|
|
|
# If we're skipping NTP configuration, we also skip the time sync here.
|
2014-02-18 19:55:56 -07:00
|
|
|
# We assume that NTP servers are discoverable through SRV records in the DNS
|
|
|
|
|
# If that fails, we try to sync directly with IPA server, assuming it runs NTP
|
|
|
|
|
root_logger.info('Synchronizing time with KDC...')
|
2015-04-14 18:56:47 +02:00
|
|
|
ntp_srv_servers = ds.ipadns_search_srv(cli_domain, '_ntp._udp',
|
|
|
|
|
None, break_on_first=False)
|
2014-02-18 19:55:56 -07:00
|
|
|
synced_ntp = False
|
2015-04-15 15:06:45 +02:00
|
|
|
ntp_servers = ntp_srv_servers
|
|
|
|
|
|
|
|
|
|
# use user specified NTP servers if there are any
|
|
|
|
|
if options.ntp_servers:
|
|
|
|
|
ntp_servers = options.ntp_servers
|
|
|
|
|
|
|
|
|
|
for s in ntp_servers:
|
2015-03-30 12:29:04 +02:00
|
|
|
synced_ntp = ipaclient.ntpconf.synconce_ntp(s, options.debug)
|
2015-04-15 15:06:45 +02:00
|
|
|
if synced_ntp:
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
if not synced_ntp and not options.ntp_servers:
|
2015-03-30 12:29:04 +02:00
|
|
|
synced_ntp = ipaclient.ntpconf.synconce_ntp(cli_server[0],
|
|
|
|
|
options.debug)
|
2014-02-18 19:55:56 -07:00
|
|
|
if not synced_ntp:
|
2015-04-15 15:06:45 +02:00
|
|
|
root_logger.warning("Unable to sync time with NTP " +
|
2014-02-18 19:55:56 -07:00
|
|
|
"server, assuming the time is in sync. Please check " +
|
|
|
|
|
"that 123 UDP port is opened.")
|
2015-04-07 08:54:30 -06:00
|
|
|
else:
|
2015-04-15 15:06:45 +02:00
|
|
|
root_logger.info('Skipping synchronizing time with NTP server.')
|
2011-08-29 10:22:20 +02:00
|
|
|
|
2009-11-19 14:14:42 -05:00
|
|
|
if not options.unattended:
|
2013-02-26 13:20:13 +01:00
|
|
|
if (options.principal is None and options.password is None and
|
|
|
|
|
options.prompt_password is False and options.keytab is None):
|
|
|
|
|
options.principal = user_input("User authorized to enroll "
|
|
|
|
|
"computers", allow_empty=False)
|
2012-06-13 11:44:06 -04:00
|
|
|
root_logger.debug(
|
|
|
|
|
"will use principal provided as option: %s", options.principal)
|
2007-11-16 20:18:36 -05:00
|
|
|
|
2015-03-16 16:30:55 +01:00
|
|
|
host_principal = 'host/%s@%s' % (hostname, cli_realm)
|
2009-11-19 14:14:42 -05:00
|
|
|
if not options.on_master:
|
2011-09-22 11:52:58 -04:00
|
|
|
nolog = tuple()
|
2009-11-19 14:14:42 -05:00
|
|
|
# First test out the kerberos configuration
|
|
|
|
|
try:
|
|
|
|
|
(krb_fd, krb_name) = tempfile.mkstemp()
|
|
|
|
|
os.close(krb_fd)
|
2012-09-06 03:52:20 -04:00
|
|
|
if configure_krb5_conf(
|
|
|
|
|
cli_realm=cli_realm,
|
|
|
|
|
cli_domain=cli_domain,
|
|
|
|
|
cli_server=cli_server,
|
|
|
|
|
cli_kdc=cli_kdc,
|
|
|
|
|
dnsok=False,
|
|
|
|
|
options=options,
|
|
|
|
|
filename=krb_name,
|
|
|
|
|
client_domain=client_domain):
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Test kerberos configuration failed")
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2010-09-10 15:57:40 -04:00
|
|
|
env['KRB5_CONFIG'] = krb_name
|
2013-03-14 14:33:56 +01:00
|
|
|
(ccache_fd, ccache_name) = tempfile.mkstemp()
|
|
|
|
|
os.close(ccache_fd)
|
2014-06-17 11:45:43 +02:00
|
|
|
join_args = [paths.SBIN_IPA_JOIN,
|
2013-11-26 12:15:33 +01:00
|
|
|
"-s", cli_server[0],
|
|
|
|
|
"-b", str(realm_to_suffix(cli_realm)),
|
|
|
|
|
"-h", hostname]
|
2009-12-04 16:39:14 -05:00
|
|
|
if options.debug:
|
|
|
|
|
join_args.append("-d")
|
2011-10-20 11:29:26 -04:00
|
|
|
env['XMLRPC_TRACE_CURL'] = 'yes'
|
2013-03-18 11:06:22 +01:00
|
|
|
if options.force_join:
|
|
|
|
|
join_args.append("-f")
|
2009-11-19 14:14:42 -05:00
|
|
|
if options.principal is not None:
|
2010-05-05 14:52:39 -04:00
|
|
|
stdin = None
|
2009-11-19 14:14:42 -05:00
|
|
|
principal = options.principal
|
|
|
|
|
if principal.find('@') == -1:
|
|
|
|
|
principal = '%s@%s' % (principal, cli_realm)
|
2010-05-05 14:52:39 -04:00
|
|
|
if options.password is not None:
|
|
|
|
|
stdin = options.password
|
|
|
|
|
else:
|
|
|
|
|
if not options.unattended:
|
2011-10-06 08:22:08 +02:00
|
|
|
try:
|
|
|
|
|
stdin = getpass.getpass("Password for %s: " % principal)
|
|
|
|
|
except EOFError:
|
|
|
|
|
stdin = None
|
2011-02-14 12:00:49 -08:00
|
|
|
if not stdin:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
|
|
|
|
"Password must be provided for %s.", principal)
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2010-05-05 14:52:39 -04:00
|
|
|
else:
|
|
|
|
|
if sys.stdin.isatty():
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Password must be provided in " +
|
|
|
|
|
"non-interactive mode.")
|
|
|
|
|
root_logger.info("This can be done via " +
|
|
|
|
|
"echo password | ipa-client-install ... " +
|
|
|
|
|
"or with the -w option.")
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2010-05-05 14:52:39 -04:00
|
|
|
else:
|
|
|
|
|
stdin = sys.stdin.readline()
|
|
|
|
|
|
2015-03-16 16:30:55 +01:00
|
|
|
try:
|
2015-05-20 14:23:30 +00:00
|
|
|
ipautil.kinit_password(principal, stdin, ccache_name,
|
|
|
|
|
config=krb_name)
|
2015-03-16 16:30:55 +01:00
|
|
|
except RuntimeError as e:
|
2013-04-22 17:09:47 +02:00
|
|
|
print_port_conf_info()
|
2015-03-16 16:30:55 +01:00
|
|
|
root_logger.error("Kerberos authentication failed: %s" % e)
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2013-02-26 13:20:13 +01:00
|
|
|
elif options.keytab:
|
|
|
|
|
join_args.append("-f")
|
|
|
|
|
if os.path.exists(options.keytab):
|
2015-03-16 16:30:55 +01:00
|
|
|
try:
|
|
|
|
|
ipautil.kinit_keytab(host_principal, options.keytab,
|
|
|
|
|
ccache_name,
|
2015-05-20 14:23:30 +00:00
|
|
|
config=krb_name,
|
2015-03-16 16:30:55 +01:00
|
|
|
attempts=options.kinit_attempts)
|
2015-07-20 16:04:07 +02:00
|
|
|
except gssapi.exceptions.GSSError as e:
|
2013-04-22 17:09:47 +02:00
|
|
|
print_port_conf_info()
|
2015-03-16 16:30:55 +01:00
|
|
|
root_logger.error("Kerberos authentication failed: %s"
|
|
|
|
|
% e)
|
2013-02-26 13:20:13 +01:00
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
else:
|
|
|
|
|
root_logger.error("Keytab file could not be found: %s"
|
|
|
|
|
% options.keytab)
|
|
|
|
|
return CLIENT_INSTALL_ERROR
|
2009-11-19 14:14:42 -05:00
|
|
|
elif options.password:
|
2011-09-22 11:52:58 -04:00
|
|
|
nolog = (options.password,)
|
2009-11-19 14:14:42 -05:00
|
|
|
join_args.append("-w")
|
|
|
|
|
join_args.append(options.password)
|
|
|
|
|
elif options.prompt_password:
|
2010-05-05 14:52:39 -04:00
|
|
|
if options.unattended:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
|
|
|
|
"Password must be provided in non-interactive mode")
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2011-10-06 08:22:08 +02:00
|
|
|
try:
|
|
|
|
|
password = getpass.getpass("Password: ")
|
|
|
|
|
except EOFError:
|
|
|
|
|
password = None
|
|
|
|
|
if not password:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Password must be provided.")
|
2011-10-06 08:22:08 +02:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2009-11-19 14:14:42 -05:00
|
|
|
join_args.append("-w")
|
|
|
|
|
join_args.append(password)
|
2011-09-22 11:52:58 -04:00
|
|
|
nolog = (password,)
|
2009-11-19 14:14:42 -05:00
|
|
|
|
2015-03-16 16:30:55 +01:00
|
|
|
env['KRB5CCNAME'] = os.environ['KRB5CCNAME'] = ccache_name
|
2012-11-15 14:57:52 -05:00
|
|
|
# Get the CA certificate
|
|
|
|
|
try:
|
|
|
|
|
os.environ['KRB5_CONFIG'] = env['KRB5_CONFIG']
|
2014-06-12 12:04:59 +02:00
|
|
|
get_ca_certs(fstore, options, cli_server[0], cli_basedn,
|
|
|
|
|
cli_realm)
|
2012-11-15 14:57:52 -05:00
|
|
|
del os.environ['KRB5_CONFIG']
|
2015-07-30 16:49:29 +02:00
|
|
|
except errors.FileError as e:
|
2013-04-19 14:32:20 +02:00
|
|
|
root_logger.error(e)
|
|
|
|
|
return CLIENT_INSTALL_ERROR
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-11-15 14:57:52 -05:00
|
|
|
root_logger.error("Cannot obtain CA certificate\n%s", e)
|
|
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
2009-11-19 14:14:42 -05:00
|
|
|
# Now join the domain
|
2011-09-22 11:52:58 -04:00
|
|
|
(stdout, stderr, returncode) = run(join_args, raiseonerr=False, env=env, nolog=nolog)
|
2009-11-19 14:14:42 -05:00
|
|
|
|
|
|
|
|
if returncode != 0:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Joining realm failed: %s", stderr)
|
2009-11-19 14:14:42 -05:00
|
|
|
if not options.force:
|
2013-04-22 12:02:45 +02:00
|
|
|
if returncode == 13:
|
|
|
|
|
root_logger.info("Use --force-join option to override "
|
|
|
|
|
"the host entry on the server "
|
|
|
|
|
"and force client enrollment.")
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Use ipa-getkeytab to obtain a host " +
|
|
|
|
|
"principal for this server.")
|
2010-09-17 17:20:23 -04:00
|
|
|
else:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Enrolled in IPA realm %s", cli_realm)
|
2010-04-05 16:27:46 -04:00
|
|
|
|
|
|
|
|
start = stderr.find('Certificate subject base is: ')
|
|
|
|
|
if start >= 0:
|
|
|
|
|
start = start + 29
|
|
|
|
|
subject_base = stderr[start:]
|
|
|
|
|
subject_base = subject_base.strip()
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 07:36:35 -04:00
|
|
|
subject_base = DN(subject_base)
|
2010-04-05 16:27:46 -04:00
|
|
|
|
2009-11-19 14:14:42 -05:00
|
|
|
if options.principal is not None:
|
2012-09-06 03:52:20 -04:00
|
|
|
stderr, stdout, returncode = run(
|
|
|
|
|
["kdestroy"], raiseonerr=False, env=env)
|
|
|
|
|
|
|
|
|
|
# Obtain the TGT. We do it with the temporary krb5.conf, so that
|
|
|
|
|
# only the KDC we're installing under is contacted.
|
|
|
|
|
# Other KDCs might not have replicated the principal yet.
|
|
|
|
|
# Once we have the TGT, it's usable on any server.
|
|
|
|
|
try:
|
2015-03-16 16:30:55 +01:00
|
|
|
ipautil.kinit_keytab(host_principal, paths.KRB5_KEYTAB,
|
|
|
|
|
CCACHE_FILE,
|
2015-05-20 14:23:30 +00:00
|
|
|
config=krb_name,
|
2015-03-16 16:30:55 +01:00
|
|
|
attempts=options.kinit_attempts)
|
|
|
|
|
env['KRB5CCNAME'] = os.environ['KRB5CCNAME'] = CCACHE_FILE
|
2015-07-20 16:04:07 +02:00
|
|
|
except gssapi.exceptions.GSSError as e:
|
2015-03-16 16:30:55 +01:00
|
|
|
print_port_conf_info()
|
|
|
|
|
root_logger.error("Failed to obtain host TGT: %s" % e)
|
2012-09-06 03:52:20 -04:00
|
|
|
# failure to get ticket makes it impossible to login and bind
|
|
|
|
|
# from sssd to LDAP, abort installation and rollback changes
|
|
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
|
|
|
|
finally:
|
2012-06-08 09:36:38 -04:00
|
|
|
try:
|
|
|
|
|
os.remove(krb_name)
|
|
|
|
|
except OSError:
|
|
|
|
|
root_logger.error("Could not remove %s", krb_name)
|
2013-03-14 14:33:56 +01:00
|
|
|
try:
|
|
|
|
|
os.remove(ccache_name)
|
|
|
|
|
except OSError:
|
|
|
|
|
pass
|
2012-06-08 09:36:38 -04:00
|
|
|
try:
|
|
|
|
|
os.remove(krb_name + ".ipabkp")
|
|
|
|
|
except OSError:
|
|
|
|
|
root_logger.error("Could not remove %s.ipabkp", krb_name)
|
2008-05-23 14:51:50 -04:00
|
|
|
|
2009-11-19 14:14:42 -05:00
|
|
|
# Configure ipa.conf
|
|
|
|
|
if not options.on_master:
|
2014-08-27 16:02:35 +02:00
|
|
|
configure_ipa_conf(fstore, cli_basedn, cli_realm, cli_domain, cli_server, hostname)
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Created /etc/ipa/default.conf")
|
2007-11-16 20:18:36 -05:00
|
|
|
|
2011-12-07 03:15:45 -05:00
|
|
|
api.bootstrap(context='cli_installer', debug=options.debug)
|
|
|
|
|
api.finalize()
|
|
|
|
|
if 'config_loaded' not in api.env:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Failed to initialize IPA API.")
|
2011-12-07 03:15:45 -05:00
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
2011-04-21 15:55:17 -04:00
|
|
|
# Always back up sssd.conf. It gets updated by authconfig --enablekrb5.
|
2014-06-17 11:45:43 +02:00
|
|
|
fstore.backup_file(paths.SSSD_CONF)
|
2010-02-03 15:41:02 -05:00
|
|
|
if options.sssd:
|
2012-04-12 14:19:15 +02:00
|
|
|
if configure_sssd_conf(fstore, cli_realm, cli_domain, cli_server, options, client_domain, hostname):
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Configured /etc/sssd/sssd.conf")
|
2010-02-03 15:41:02 -05:00
|
|
|
|
2012-09-12 07:31:15 -04:00
|
|
|
if options.on_master:
|
|
|
|
|
# If on master assume kerberos is already configured properly.
|
|
|
|
|
# Get the host TGT.
|
|
|
|
|
try:
|
2015-03-16 16:30:55 +01:00
|
|
|
ipautil.kinit_keytab(host_principal, paths.KRB5_KEYTAB,
|
|
|
|
|
CCACHE_FILE,
|
|
|
|
|
attempts=options.kinit_attempts)
|
|
|
|
|
os.environ['KRB5CCNAME'] = CCACHE_FILE
|
2015-07-20 16:04:07 +02:00
|
|
|
except gssapi.exceptions.GSSError as e:
|
2015-03-16 16:30:55 +01:00
|
|
|
root_logger.error("Failed to obtain host TGT: %s" % e)
|
2012-09-12 07:31:15 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
else:
|
2009-11-19 14:14:42 -05:00
|
|
|
# Configure krb5.conf
|
2014-06-17 11:45:43 +02:00
|
|
|
fstore.backup_file(paths.KRB5_CONF)
|
2012-09-06 03:52:20 -04:00
|
|
|
if configure_krb5_conf(
|
|
|
|
|
cli_realm=cli_realm,
|
|
|
|
|
cli_domain=cli_domain,
|
|
|
|
|
cli_server=cli_server,
|
|
|
|
|
cli_kdc=cli_kdc,
|
|
|
|
|
dnsok=dnsok,
|
|
|
|
|
options=options,
|
2014-06-17 11:45:43 +02:00
|
|
|
filename=paths.KRB5_CONF,
|
2012-09-06 03:52:20 -04:00
|
|
|
client_domain=client_domain):
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2009-11-19 14:14:42 -05:00
|
|
|
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info(
|
|
|
|
|
"Configured /etc/krb5.conf for IPA realm %s", cli_realm)
|
2007-08-30 19:40:54 -04:00
|
|
|
|
2012-10-01 13:05:11 -04:00
|
|
|
# Clear out any current session keyring information
|
|
|
|
|
try:
|
2012-12-04 18:20:17 -05:00
|
|
|
delete_persistent_client_session_data(host_principal)
|
2012-10-01 13:05:11 -04:00
|
|
|
except ValueError:
|
|
|
|
|
pass
|
|
|
|
|
|
2015-03-17 09:29:21 +00:00
|
|
|
ca_certs = x509.load_certificate_list_from_file(CACERT)
|
|
|
|
|
ca_certs = [cert.der_data for cert in ca_certs]
|
|
|
|
|
|
2014-09-18 12:00:15 +02:00
|
|
|
with certdb.NSSDatabase() as tmp_db:
|
2014-09-18 16:28:59 +02:00
|
|
|
# Add CA certs to a temporary NSS database
|
|
|
|
|
try:
|
|
|
|
|
pwd_file = ipautil.write_tmp_file(ipautil.ipa_generate_password())
|
2014-09-18 12:00:15 +02:00
|
|
|
tmp_db.create_db(pwd_file.name)
|
2014-06-12 13:40:56 +02:00
|
|
|
|
2014-09-18 16:28:59 +02:00
|
|
|
for i, cert in enumerate(ca_certs):
|
2014-09-18 12:00:15 +02:00
|
|
|
tmp_db.add_cert(cert, 'CA certificate %d' % (i + 1), 'C,,')
|
2015-07-30 16:49:29 +02:00
|
|
|
except CalledProcessError as e:
|
2014-09-18 16:28:59 +02:00
|
|
|
root_logger.info("Failed to add CA to temporary NSS database.")
|
|
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
|
|
|
|
# Now, let's try to connect to the server's RPC interface
|
|
|
|
|
connected = False
|
2012-05-02 15:36:04 +02:00
|
|
|
try:
|
2014-09-18 12:00:15 +02:00
|
|
|
api.Backend.rpcclient.connect(nss_dir=tmp_db.secdir)
|
2014-09-18 16:28:59 +02:00
|
|
|
connected = True
|
|
|
|
|
root_logger.debug("Try RPC connection")
|
2012-12-19 04:25:24 -05:00
|
|
|
api.Backend.rpcclient.forward('ping')
|
2015-07-30 16:49:29 +02:00
|
|
|
except errors.KerberosError as e:
|
2014-09-18 16:28:59 +02:00
|
|
|
if connected:
|
|
|
|
|
api.Backend.rpcclient.disconnect()
|
|
|
|
|
root_logger.info(
|
|
|
|
|
"Cannot connect to the server due to Kerberos error: %s. "
|
|
|
|
|
"Trying with delegate=True", e)
|
|
|
|
|
try:
|
|
|
|
|
api.Backend.rpcclient.connect(delegate=True,
|
2014-09-18 12:00:15 +02:00
|
|
|
nss_dir=tmp_db.secdir)
|
2014-09-18 16:28:59 +02:00
|
|
|
root_logger.debug("Try RPC connection")
|
|
|
|
|
api.Backend.rpcclient.forward('ping')
|
2013-10-04 00:44:51 +02:00
|
|
|
|
2014-09-18 16:28:59 +02:00
|
|
|
root_logger.info("Connection with delegate=True successful")
|
2012-05-02 15:36:04 +02:00
|
|
|
|
2014-09-18 16:28:59 +02:00
|
|
|
# The remote server is not capable of Kerberos S4U2Proxy
|
|
|
|
|
# delegation. This features is implemented in IPA server
|
|
|
|
|
# version 2.2 and higher
|
|
|
|
|
root_logger.warning(
|
|
|
|
|
"Target IPA server has a lower version than the enrolled "
|
|
|
|
|
"client")
|
|
|
|
|
root_logger.warning(
|
|
|
|
|
"Some capabilities including the ipa command capability "
|
|
|
|
|
"may not be available")
|
2015-07-30 16:49:29 +02:00
|
|
|
except errors.PublicError as e2:
|
2014-09-18 16:28:59 +02:00
|
|
|
root_logger.warning(
|
|
|
|
|
"Second connect with delegate=True also failed: %s", e2)
|
|
|
|
|
root_logger.error(
|
|
|
|
|
"Cannot connect to the IPA server RPC interface: %s", e2)
|
|
|
|
|
return CLIENT_INSTALL_ERROR
|
2015-07-30 16:49:29 +02:00
|
|
|
except errors.PublicError as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
2014-09-18 16:28:59 +02:00
|
|
|
"Cannot connect to the server due to generic error: %s", e)
|
2012-05-02 15:36:04 +02:00
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
2013-10-04 10:23:16 +02:00
|
|
|
# Use the RPC directly so older servers are supported
|
2014-10-13 14:30:15 +02:00
|
|
|
try:
|
|
|
|
|
result = api.Backend.rpcclient.forward(
|
|
|
|
|
'ca_is_enabled',
|
2015-03-17 09:35:49 +00:00
|
|
|
version=u'2.107',
|
2014-10-13 14:30:15 +02:00
|
|
|
)
|
|
|
|
|
ca_enabled = result['result']
|
2015-03-17 09:35:49 +00:00
|
|
|
except (errors.CommandError, errors.NetworkError):
|
2014-10-13 14:30:15 +02:00
|
|
|
result = api.Backend.rpcclient.forward(
|
|
|
|
|
'env',
|
|
|
|
|
server=True,
|
|
|
|
|
version=u'2.0',
|
|
|
|
|
)
|
|
|
|
|
ca_enabled = result['result']['enable_ra']
|
|
|
|
|
if not ca_enabled:
|
2013-03-28 17:41:05 +01:00
|
|
|
disable_ra()
|
|
|
|
|
|
2014-09-18 16:28:59 +02:00
|
|
|
# Create IPA NSS database
|
|
|
|
|
try:
|
|
|
|
|
certdb.create_ipa_nssdb()
|
2015-07-30 16:49:29 +02:00
|
|
|
except ipautil.CalledProcessError as e:
|
2014-09-18 16:28:59 +02:00
|
|
|
root_logger.error("Failed to create IPA NSS database: %s", e)
|
|
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
2014-06-12 17:20:19 +02:00
|
|
|
# Get CA certificates from the certificate store
|
2015-03-17 09:29:21 +00:00
|
|
|
try:
|
|
|
|
|
ca_certs = get_certs_from_ldap(cli_server[0], cli_basedn, cli_realm,
|
|
|
|
|
ca_enabled)
|
|
|
|
|
except errors.NoCertificateError:
|
|
|
|
|
if ca_enabled:
|
|
|
|
|
ca_subject = DN(('CN', 'Certificate Authority'), subject_base)
|
|
|
|
|
else:
|
|
|
|
|
ca_subject = None
|
|
|
|
|
ca_certs = certstore.make_compat_ca_certs(ca_certs, cli_realm,
|
|
|
|
|
ca_subject)
|
2014-09-18 16:28:59 +02:00
|
|
|
ca_certs_trust = [(c, n, certstore.key_policy_to_trust_flags(t, True, u))
|
|
|
|
|
for (c, n, t, u) in ca_certs]
|
2014-06-12 17:20:19 +02:00
|
|
|
|
2014-09-18 16:28:59 +02:00
|
|
|
# Add the CA certificates to the IPA NSS database
|
|
|
|
|
root_logger.debug("Adding CA certificates to the IPA NSS database.")
|
2014-09-18 12:00:15 +02:00
|
|
|
ipa_db = certdb.NSSDatabase(paths.IPA_NSSDB_DIR)
|
2014-09-18 16:28:59 +02:00
|
|
|
for cert, nickname, trust_flags in ca_certs_trust:
|
|
|
|
|
try:
|
2014-09-18 12:00:15 +02:00
|
|
|
ipa_db.add_cert(cert, nickname, trust_flags)
|
2015-07-30 16:49:29 +02:00
|
|
|
except CalledProcessError as e:
|
2014-09-18 16:28:59 +02:00
|
|
|
root_logger.error(
|
|
|
|
|
"Failed to add %s to the IPA NSS database.", nickname)
|
|
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
|
|
|
|
|
# Add the CA certificates to the platform-dependant systemwide CA store
|
2014-06-12 17:20:19 +02:00
|
|
|
tasks.insert_ca_certs_into_systemwide_ca_store(ca_certs)
|
|
|
|
|
|
2014-09-18 16:28:59 +02:00
|
|
|
# Add the CA certificates to the default NSS database
|
|
|
|
|
root_logger.debug(
|
|
|
|
|
"Attempting to add CA certificates to the default NSS database.")
|
2014-09-18 12:00:15 +02:00
|
|
|
sys_db = certdb.NSSDatabase(paths.NSS_DB_DIR)
|
2014-09-18 16:28:59 +02:00
|
|
|
for cert, nickname, trust_flags in ca_certs_trust:
|
2014-06-12 13:40:56 +02:00
|
|
|
try:
|
2014-09-18 12:00:15 +02:00
|
|
|
sys_db.add_cert(cert, nickname, trust_flags)
|
2015-07-30 16:49:29 +02:00
|
|
|
except CalledProcessError as e:
|
2014-09-18 16:28:59 +02:00
|
|
|
root_logger.error(
|
|
|
|
|
"Failed to add %s to the default NSS database.", nickname)
|
2014-06-12 13:40:56 +02:00
|
|
|
return CLIENT_INSTALL_ERROR
|
2014-09-18 16:28:59 +02:00
|
|
|
root_logger.info("Added CA certificates to the default NSS database.")
|
2014-06-12 13:40:56 +02:00
|
|
|
|
2011-12-07 03:15:45 -05:00
|
|
|
if not options.on_master:
|
2015-08-18 19:45:23 +02:00
|
|
|
client_dns(cli_server[0], hostname, options)
|
2014-10-07 19:07:13 +02:00
|
|
|
configure_certmonger(fstore, subject_base, cli_realm, hostname,
|
2014-10-13 14:30:15 +02:00
|
|
|
options, ca_enabled)
|
2011-10-21 11:18:26 +02:00
|
|
|
|
2014-05-29 10:37:18 +02:00
|
|
|
update_ssh_keys(cli_server[0], hostname, services.knownservices.sshd.get_config_dir(), options.create_sshfp)
|
2011-12-07 03:40:51 -05:00
|
|
|
|
2011-12-07 03:15:45 -05:00
|
|
|
try:
|
|
|
|
|
os.remove(CCACHE_FILE)
|
2012-09-25 08:35:06 -04:00
|
|
|
except Exception:
|
2011-12-07 03:15:45 -05:00
|
|
|
pass
|
|
|
|
|
|
2011-05-10 15:14:20 +02:00
|
|
|
#Name Server Caching Daemon. Disable for SSSD, use otherwise (if installed)
|
2014-05-29 10:37:18 +02:00
|
|
|
nscd = services.knownservices.nscd
|
2011-09-13 00:11:24 +03:00
|
|
|
if nscd.is_installed():
|
2013-11-07 17:18:32 +01:00
|
|
|
save_state(nscd)
|
|
|
|
|
|
2011-05-10 15:14:20 +02:00
|
|
|
try:
|
2011-09-13 00:11:24 +03:00
|
|
|
if options.sssd:
|
|
|
|
|
nscd_service_action = 'stop'
|
|
|
|
|
nscd.stop()
|
|
|
|
|
else:
|
|
|
|
|
nscd_service_action = 'restart'
|
|
|
|
|
nscd.restart()
|
2012-09-25 08:35:06 -04:00
|
|
|
except Exception:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.warning("Failed to %s the %s daemon",
|
|
|
|
|
nscd_service_action, nscd.service_name)
|
2011-05-10 15:14:20 +02:00
|
|
|
if not options.sssd:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.warning(
|
|
|
|
|
"Caching of users/groups will not be available")
|
2011-05-18 17:06:15 +02:00
|
|
|
|
2011-05-10 15:14:20 +02:00
|
|
|
try:
|
2011-09-13 00:11:24 +03:00
|
|
|
if options.sssd:
|
|
|
|
|
nscd.disable()
|
|
|
|
|
else:
|
|
|
|
|
nscd.enable()
|
2012-09-25 08:35:06 -04:00
|
|
|
except Exception:
|
2011-05-10 15:14:20 +02:00
|
|
|
if not options.sssd:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.warning(
|
|
|
|
|
"Failed to configure automatic startup of the %s daemon",
|
|
|
|
|
nscd.service_name)
|
|
|
|
|
root_logger.info("Caching of users/groups will not be " +
|
|
|
|
|
"available after reboot")
|
2011-07-19 15:33:53 +03:00
|
|
|
else:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.warning(
|
|
|
|
|
"Failed to disable %s daemon. Disable it manually.",
|
|
|
|
|
nscd.service_name)
|
2011-07-01 11:11:38 +03:00
|
|
|
|
2011-05-10 15:14:20 +02:00
|
|
|
else:
|
|
|
|
|
# this is optional service, just log
|
2011-07-29 13:05:07 +03:00
|
|
|
if not options.sssd:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("%s daemon is not installed, skip configuration",
|
|
|
|
|
nscd.service_name)
|
2011-02-09 13:53:39 -05:00
|
|
|
|
2014-05-29 10:37:18 +02:00
|
|
|
nslcd = services.knownservices.nslcd
|
2013-11-07 17:18:32 +01:00
|
|
|
if nscd.is_installed():
|
|
|
|
|
save_state(nslcd)
|
|
|
|
|
|
2011-07-29 13:05:07 +03:00
|
|
|
retcode, conf, filename = (0, None, None)
|
2010-03-15 13:41:07 -04:00
|
|
|
|
2012-02-23 17:24:46 +01:00
|
|
|
if not options.no_ac:
|
|
|
|
|
# Modify nsswitch/pam stack
|
2014-05-27 09:13:59 +02:00
|
|
|
tasks.modify_nsswitch_pam_stack(sssd=options.sssd,
|
|
|
|
|
mkhomedir=options.mkhomedir,
|
|
|
|
|
statestore=statestore)
|
|
|
|
|
|
|
|
|
|
root_logger.info("%s enabled", "SSSD" if options.sssd else "LDAP")
|
|
|
|
|
|
2012-12-03 11:45:49 +01:00
|
|
|
if options.sssd:
|
2014-05-29 10:37:18 +02:00
|
|
|
sssd = services.service('sssd')
|
2012-12-03 11:45:49 +01:00
|
|
|
try:
|
|
|
|
|
sssd.restart()
|
|
|
|
|
except CalledProcessError:
|
|
|
|
|
root_logger.warning("SSSD service restart was unsuccessful.")
|
2011-05-18 17:06:15 +02:00
|
|
|
|
2012-12-18 13:22:40 +01:00
|
|
|
try:
|
|
|
|
|
sssd.enable()
|
2015-07-30 16:49:29 +02:00
|
|
|
except CalledProcessError as e:
|
2012-12-18 13:22:40 +01:00
|
|
|
root_logger.warning(
|
|
|
|
|
"Failed to enable automatic startup of the SSSD daemon: %s", e)
|
|
|
|
|
|
2012-02-23 17:24:46 +01:00
|
|
|
if not options.sssd:
|
2014-05-27 09:13:59 +02:00
|
|
|
tasks.modify_pam_to_use_krb5(statestore)
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Kerberos 5 enabled")
|
2012-02-23 17:24:46 +01:00
|
|
|
|
|
|
|
|
# Update non-SSSD LDAP configuration after authconfig calls as it would
|
|
|
|
|
# change its configuration otherways
|
|
|
|
|
if not options.sssd:
|
|
|
|
|
for configurer in [configure_ldap_conf, configure_nslcd_conf]:
|
2012-06-08 09:36:38 -04:00
|
|
|
(retcode, conf, filenames) = configurer(fstore, cli_basedn, cli_realm, cli_domain, cli_server, dnsok, options, nosssd_files[configurer.__name__])
|
2012-02-23 17:24:46 +01:00
|
|
|
if retcode:
|
|
|
|
|
return CLIENT_INSTALL_ERROR
|
|
|
|
|
if conf:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info(
|
|
|
|
|
"%s configured using configuration file(s) %s",
|
|
|
|
|
conf, filenames)
|
2011-05-18 17:06:15 +02:00
|
|
|
|
2013-04-22 12:55:38 +02:00
|
|
|
if configure_openldap_conf(fstore, cli_basedn, cli_server):
|
|
|
|
|
root_logger.info("Configured /etc/openldap/ldap.conf")
|
|
|
|
|
else:
|
|
|
|
|
root_logger.info("Failed to configure /etc/openldap/ldap.conf")
|
2012-01-31 22:44:20 -05:00
|
|
|
|
2012-02-23 17:24:46 +01:00
|
|
|
#Check that nss is working properly
|
|
|
|
|
if not options.on_master:
|
|
|
|
|
n = 0
|
|
|
|
|
found = False
|
|
|
|
|
# Loop for up to 10 seconds to see if nss is working properly.
|
|
|
|
|
# It can sometimes take a few seconds to connect to the remote provider.
|
|
|
|
|
# Particulary, SSSD might take longer than 6-8 seconds.
|
|
|
|
|
while n < 10 and not found:
|
|
|
|
|
try:
|
2013-09-18 13:18:37 +02:00
|
|
|
ipautil.run(["getent", "passwd", "admin@%s" % cli_domain])
|
2012-02-23 17:24:46 +01:00
|
|
|
found = True
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-02-23 17:24:46 +01:00
|
|
|
time.sleep(1)
|
|
|
|
|
n = n + 1
|
2008-02-20 10:16:19 -05:00
|
|
|
|
2012-02-23 17:24:46 +01:00
|
|
|
if not found:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error(
|
2013-07-18 19:51:22 +02:00
|
|
|
"Unable to find 'admin' user with "
|
2013-09-18 13:18:37 +02:00
|
|
|
"'getent passwd admin@%s'!" % cli_domain)
|
2012-02-23 17:24:46 +01:00
|
|
|
if conf:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("Recognized configuration: %s", conf)
|
2012-02-23 17:24:46 +01:00
|
|
|
else:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Unable to reliably detect " +
|
|
|
|
|
"configuration. Check NSS setup manually.")
|
2012-02-23 17:24:46 +01:00
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
hardcode_ldap_server(cli_server)
|
2015-07-30 16:49:29 +02:00
|
|
|
except Exception as e:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Adding hardcoded server name to " +
|
|
|
|
|
"/etc/ldap.conf failed: %s", str(e))
|
2008-02-20 10:16:19 -05:00
|
|
|
|
|
|
|
|
if options.conf_ntp and not options.on_master:
|
2012-12-07 16:44:32 +01:00
|
|
|
# disable other time&date services first
|
|
|
|
|
if options.force_ntpd:
|
|
|
|
|
ipaclient.ntpconf.force_ntpd(statestore)
|
2015-04-15 14:32:17 +02:00
|
|
|
|
2015-04-15 15:06:45 +02:00
|
|
|
if options.ntp_servers:
|
2015-04-14 18:56:47 +02:00
|
|
|
ntp_servers = options.ntp_servers
|
2015-04-15 14:32:17 +02:00
|
|
|
elif ntp_srv_servers:
|
|
|
|
|
ntp_servers = ntp_srv_servers
|
2008-03-14 08:42:06 -04:00
|
|
|
else:
|
2015-04-15 14:32:17 +02:00
|
|
|
root_logger.warning("No SRV records of NTP servers found. IPA "
|
|
|
|
|
"server address will be used")
|
2015-04-14 18:56:47 +02:00
|
|
|
ntp_servers = cli_server
|
2015-04-15 14:32:17 +02:00
|
|
|
|
2015-04-14 18:56:47 +02:00
|
|
|
ipaclient.ntpconf.config_ntp(ntp_servers, fstore, statestore)
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info("NTP enabled")
|
|
|
|
|
|
2012-09-12 09:19:26 -04:00
|
|
|
if options.conf_ssh:
|
|
|
|
|
configure_ssh_config(fstore, options)
|
|
|
|
|
|
|
|
|
|
if options.conf_sshd:
|
|
|
|
|
configure_sshd_config(fstore, options)
|
2011-12-07 03:49:09 -05:00
|
|
|
|
2013-08-30 16:05:01 +02:00
|
|
|
if options.location:
|
|
|
|
|
configure_automount(options)
|
|
|
|
|
|
2013-11-04 11:52:02 +01:00
|
|
|
if options.configure_firefox:
|
|
|
|
|
configure_firefox(options, statestore, cli_domain)
|
|
|
|
|
|
2013-09-25 13:45:45 +02:00
|
|
|
if not options.no_nisdomain:
|
|
|
|
|
configure_nisdomain(options=options, domain=cli_domain)
|
|
|
|
|
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.info('Client configuration complete.')
|
2008-01-17 16:36:05 -05:00
|
|
|
|
2007-08-16 18:00:16 -04:00
|
|
|
return 0
|
|
|
|
|
|
2011-08-29 17:44:02 -04:00
|
|
|
def main():
|
|
|
|
|
safe_options, options = parse_options()
|
|
|
|
|
|
2011-11-18 13:55:16 +01:00
|
|
|
if not os.getegid() == 0:
|
|
|
|
|
sys.exit("\nYou must be root to run ipa-client-install.\n")
|
2014-05-29 10:18:21 +02:00
|
|
|
tasks.check_selinux_status()
|
2011-08-29 17:44:02 -04:00
|
|
|
logging_setup(options)
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.debug(
|
|
|
|
|
'%s was invoked with options: %s', sys.argv[0], safe_options)
|
|
|
|
|
root_logger.debug("missing options might be asked for interactively later")
|
2014-03-19 13:54:20 +01:00
|
|
|
root_logger.debug('IPA version %s' % version.VENDOR_VERSION)
|
2011-08-29 17:44:02 -04:00
|
|
|
|
|
|
|
|
env={"PATH":"/bin:/sbin:/usr/kerberos/bin:/usr/kerberos/sbin:/usr/bin:/usr/sbin"}
|
|
|
|
|
|
|
|
|
|
global fstore
|
2014-06-17 11:45:43 +02:00
|
|
|
fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
|
2011-08-29 17:44:02 -04:00
|
|
|
|
|
|
|
|
global statestore
|
2014-06-17 11:45:43 +02:00
|
|
|
statestore = sysrestore.StateFile(paths.IPA_CLIENT_SYSRESTORE)
|
2011-08-29 17:44:02 -04:00
|
|
|
|
|
|
|
|
if options.uninstall:
|
|
|
|
|
return uninstall(options, env)
|
|
|
|
|
|
2013-02-19 17:59:50 +01:00
|
|
|
if is_ipa_client_installed(on_master=options.on_master):
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("IPA client is already configured on this system.")
|
|
|
|
|
root_logger.info(
|
2012-09-10 14:11:40 +03:00
|
|
|
"If you want to reinstall the IPA client, uninstall it first " +
|
|
|
|
|
"using 'ipa-client-install --uninstall'.")
|
2011-08-29 17:44:02 -04:00
|
|
|
return CLIENT_ALREADY_CONFIGURED
|
|
|
|
|
|
|
|
|
|
rval = install(options, env, fstore, statestore)
|
|
|
|
|
if rval == CLIENT_INSTALL_ERROR:
|
|
|
|
|
if options.force:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.warning(
|
|
|
|
|
"Installation failed. Force set so not rolling back changes.")
|
2013-10-24 17:43:21 +02:00
|
|
|
elif options.on_master:
|
|
|
|
|
root_logger.warning(
|
|
|
|
|
"Installation failed. As this is IPA server, changes will not "
|
|
|
|
|
"be rolled back."
|
|
|
|
|
)
|
2011-08-29 17:44:02 -04:00
|
|
|
else:
|
2012-06-08 09:36:38 -04:00
|
|
|
root_logger.error("Installation failed. Rolling back changes.")
|
2011-08-29 17:44:02 -04:00
|
|
|
options.unattended = True
|
2012-06-08 09:36:38 -04:00
|
|
|
uninstall(options, env)
|
2011-08-29 17:44:02 -04:00
|
|
|
|
2011-10-07 14:47:28 +02:00
|
|
|
return rval
|
|
|
|
|
|
2008-02-26 15:31:34 -05:00
|
|
|
try:
|
2008-05-30 14:21:45 -04:00
|
|
|
if __name__ == "__main__":
|
|
|
|
|
sys.exit(main())
|
2015-07-30 16:49:29 +02:00
|
|
|
except SystemExit as e:
|
2008-05-30 14:21:45 -04:00
|
|
|
sys.exit(e)
|
2008-02-26 15:31:34 -05:00
|
|
|
except KeyboardInterrupt:
|
|
|
|
|
sys.exit(1)
|
2015-07-30 16:49:29 +02:00
|
|
|
except RuntimeError as e:
|
2011-03-04 13:09:19 -05:00
|
|
|
sys.exit(e)
|
2011-12-07 03:15:45 -05:00
|
|
|
finally:
|
|
|
|
|
try:
|
|
|
|
|
os.remove(CCACHE_FILE)
|
2012-09-25 08:35:06 -04:00
|
|
|
except Exception:
|
2011-12-07 03:15:45 -05:00
|
|
|
pass
|