2014-10-03 04:14:56 -05:00
|
|
|
# Authors: Simo Sorce <ssorce@redhat.com>
|
|
|
|
# Alexander Bokovoy <abokovoy@redhat.com>
|
|
|
|
# Martin Kosek <mkosek@redhat.com>
|
|
|
|
# Tomas Babej <tbabej@redhat.com>
|
|
|
|
#
|
|
|
|
# Copyright (C) 2007-2014 Red Hat
|
|
|
|
# see file 'COPYING' for use and warranty information
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
'''
|
|
|
|
This module contains default Red Hat OS family-specific implementations of
|
|
|
|
system tasks.
|
|
|
|
'''
|
2018-04-05 02:21:16 -05:00
|
|
|
from __future__ import print_function, absolute_import
|
2014-10-03 04:14:56 -05:00
|
|
|
|
2018-04-04 10:30:37 -05:00
|
|
|
import ctypes
|
2017-05-25 05:42:54 -05:00
|
|
|
import logging
|
2014-10-03 04:14:56 -05:00
|
|
|
import os
|
|
|
|
import socket
|
2016-04-19 11:36:32 -05:00
|
|
|
import traceback
|
2016-06-30 02:15:45 -05:00
|
|
|
import errno
|
2018-09-27 00:47:07 -05:00
|
|
|
import urllib
|
2018-12-12 10:32:06 -06:00
|
|
|
import subprocess
|
2018-02-06 03:05:49 -06:00
|
|
|
import sys
|
2019-04-05 06:39:13 -05:00
|
|
|
import textwrap
|
2016-04-19 11:36:32 -05:00
|
|
|
|
2016-01-11 09:22:40 -06:00
|
|
|
from ctypes.util import find_library
|
2015-12-09 11:53:35 -06:00
|
|
|
from functools import total_ordering
|
2014-10-03 04:14:56 -05:00
|
|
|
from subprocess import CalledProcessError
|
2017-03-01 04:19:08 -06:00
|
|
|
|
2014-10-03 04:14:56 -05:00
|
|
|
from pyasn1.error import PyAsn1Error
|
|
|
|
|
2018-05-23 03:37:58 -05:00
|
|
|
from ipapython import directivesetter
|
2014-10-03 04:14:56 -05:00
|
|
|
from ipapython import ipautil
|
|
|
|
import ipapython.errors
|
|
|
|
|
2016-01-19 07:18:30 -06:00
|
|
|
from ipaplatform.constants import constants
|
2014-10-03 04:14:56 -05:00
|
|
|
from ipaplatform.paths import paths
|
2018-04-26 09:51:42 -05:00
|
|
|
from ipaplatform.redhat.authconfig import get_auth_tool
|
2014-10-03 04:14:56 -05:00
|
|
|
from ipaplatform.base.tasks import BaseTaskNamespace
|
|
|
|
|
2017-05-25 05:42:54 -05:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2014-10-03 04:14:56 -05:00
|
|
|
|
2019-04-24 06:13:45 -05:00
|
|
|
# /etc/pkcs11/modules override
|
|
|
|
# base filen ame, module, list of disabled-in
|
|
|
|
# 'p11-kit-proxy' disables proxying of module, see man(5) pkcs11.conf
|
|
|
|
PKCS11_MODULES = [
|
|
|
|
('softhsm2', paths.LIBSOFTHSM2_SO, ['p11-kit-proxy']),
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2019-04-05 06:39:13 -05:00
|
|
|
NM_IPA_CONF = textwrap.dedent("""
|
|
|
|
# auto-generated by IPA installer
|
|
|
|
[main]
|
|
|
|
dns=default
|
|
|
|
|
|
|
|
[global-dns]
|
|
|
|
searches={searches}
|
|
|
|
|
|
|
|
[global-dns-domain-*]
|
|
|
|
servers={servers}
|
|
|
|
""")
|
|
|
|
|
|
|
|
|
2015-12-09 11:53:35 -06:00
|
|
|
@total_ordering
|
2018-09-26 04:59:50 -05:00
|
|
|
class IPAVersion:
|
2018-04-04 10:30:37 -05:00
|
|
|
_rpmvercmp_func = None
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _rpmvercmp(cls, a, b):
|
|
|
|
"""Lazy load and call librpm's rpmvercmp
|
|
|
|
"""
|
|
|
|
rpmvercmp_func = cls._rpmvercmp_func
|
|
|
|
if rpmvercmp_func is None:
|
|
|
|
librpm = ctypes.CDLL(find_library('rpm'))
|
|
|
|
rpmvercmp_func = librpm.rpmvercmp
|
|
|
|
# int rpmvercmp(const char *a, const char *b)
|
|
|
|
rpmvercmp_func.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
|
|
|
|
rpmvercmp_func.restype = ctypes.c_int
|
|
|
|
cls._rpmvercmp_func = rpmvercmp_func
|
|
|
|
return rpmvercmp_func(a, b)
|
2015-12-09 11:53:35 -06:00
|
|
|
|
|
|
|
def __init__(self, version):
|
2016-11-18 05:24:09 -06:00
|
|
|
self._version = version
|
|
|
|
self._bytes = version.encode('utf-8')
|
2015-12-09 11:53:35 -06:00
|
|
|
|
2016-05-06 10:54:10 -05:00
|
|
|
@property
|
2016-11-18 05:24:09 -06:00
|
|
|
def version(self):
|
|
|
|
return self._version
|
2016-05-06 10:54:10 -05:00
|
|
|
|
2015-12-09 11:53:35 -06:00
|
|
|
def __eq__(self, other):
|
2016-11-18 05:24:09 -06:00
|
|
|
if not isinstance(other, IPAVersion):
|
|
|
|
return NotImplemented
|
2018-04-04 10:30:37 -05:00
|
|
|
return self._rpmvercmp(self._bytes, other._bytes) == 0
|
2015-12-09 11:53:35 -06:00
|
|
|
|
|
|
|
def __lt__(self, other):
|
2016-11-18 05:24:09 -06:00
|
|
|
if not isinstance(other, IPAVersion):
|
|
|
|
return NotImplemented
|
2018-04-04 10:30:37 -05:00
|
|
|
return self._rpmvercmp(self._bytes, other._bytes) < 0
|
2015-12-09 11:53:35 -06:00
|
|
|
|
2016-11-18 05:24:09 -06:00
|
|
|
def __hash__(self):
|
|
|
|
return hash(self._version)
|
|
|
|
|
2015-12-09 11:53:35 -06:00
|
|
|
|
2014-10-03 04:14:56 -05:00
|
|
|
class RedHatTaskNamespace(BaseTaskNamespace):
|
|
|
|
|
2018-02-08 09:57:11 -06:00
|
|
|
def restore_context(self, filepath, force=False):
|
|
|
|
"""Restore SELinux security context on the given filepath.
|
|
|
|
|
2014-10-03 04:14:56 -05:00
|
|
|
SELinux equivalent is /path/to/restorecon <filepath>
|
|
|
|
restorecon's return values are not reliable so we have to
|
|
|
|
ignore them (BZ #739604).
|
|
|
|
|
|
|
|
ipautil.run() will do the logging.
|
|
|
|
"""
|
2018-02-08 09:57:11 -06:00
|
|
|
restorecon = paths.SBIN_RESTORECON
|
2019-04-25 06:24:48 -05:00
|
|
|
if not self.is_selinux_enabled() or not os.path.exists(restorecon):
|
2014-10-03 04:14:56 -05:00
|
|
|
return
|
|
|
|
|
2018-02-08 09:57:11 -06:00
|
|
|
# Force reset of context to match file_context for customizable
|
|
|
|
# files, and the default file context, changing the user, role,
|
|
|
|
# range portion as well as the type.
|
|
|
|
args = [restorecon]
|
|
|
|
if force:
|
|
|
|
args.append('-F')
|
|
|
|
args.append(filepath)
|
|
|
|
ipautil.run(args, raiseonerr=False)
|
2014-10-03 04:14:56 -05:00
|
|
|
|
2019-04-25 06:24:48 -05:00
|
|
|
def is_selinux_enabled(self):
|
|
|
|
"""Check if SELinux is available and enabled
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
ipautil.run([paths.SELINUXENABLED])
|
|
|
|
except ipautil.CalledProcessError:
|
|
|
|
# selinuxenabled returns 1 if not enabled
|
|
|
|
return False
|
|
|
|
except OSError:
|
|
|
|
# selinuxenabled binary not available
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
return True
|
|
|
|
|
2014-10-03 04:14:56 -05:00
|
|
|
def check_selinux_status(self, restorecon=paths.RESTORECON):
|
|
|
|
"""
|
|
|
|
We don't have a specific package requirement for policycoreutils
|
|
|
|
which provides restorecon. This is because we don't require
|
|
|
|
SELinux on client installs. However if SELinux is enabled then
|
|
|
|
this package is required.
|
|
|
|
|
|
|
|
This function returns nothing but may raise a Runtime exception
|
|
|
|
if SELinux is enabled but restorecon is not available.
|
|
|
|
"""
|
2019-04-25 06:24:48 -05:00
|
|
|
if not self.is_selinux_enabled():
|
|
|
|
return False
|
2014-10-03 04:14:56 -05:00
|
|
|
|
|
|
|
if not os.path.exists(restorecon):
|
|
|
|
raise RuntimeError('SELinux is enabled but %s does not exist.\n'
|
|
|
|
'Install the policycoreutils package and start '
|
|
|
|
'the installation again.' % restorecon)
|
2019-04-25 06:24:48 -05:00
|
|
|
return True
|
2014-10-03 04:14:56 -05:00
|
|
|
|
2017-03-07 06:54:41 -06:00
|
|
|
def check_ipv6_stack_enabled(self):
|
|
|
|
"""Checks whether IPv6 kernel module is loaded.
|
|
|
|
|
|
|
|
Function checks if /proc/net/if_inet6 is present. If IPv6 stack is
|
|
|
|
enabled, it exists and contains the interfaces configuration.
|
|
|
|
|
|
|
|
:raises: RuntimeError when IPv6 stack is disabled
|
|
|
|
"""
|
|
|
|
if not os.path.exists(paths.IF_INET6):
|
|
|
|
raise RuntimeError(
|
2017-09-19 04:06:39 -05:00
|
|
|
"IPv6 stack has to be enabled in the kernel and some "
|
|
|
|
"interface has to have ::1 address assigned. Typically "
|
|
|
|
"this is 'lo' interface. If you do not wish to use IPv6 "
|
|
|
|
"globally, disable it on the specific interfaces in "
|
|
|
|
"sysctl.conf except 'lo' interface.")
|
|
|
|
|
2017-12-13 02:33:49 -06:00
|
|
|
# XXX This is a hack to work around an issue with Travis CI by
|
|
|
|
# skipping IPv6 address test. The Dec 2017 update removed ::1 from
|
|
|
|
# loopback, see https://github.com/travis-ci/travis-ci/issues/8891.
|
|
|
|
if os.environ.get('TRAVIS') == 'true':
|
|
|
|
return
|
|
|
|
|
2017-09-19 04:06:39 -05:00
|
|
|
try:
|
|
|
|
localhost6 = ipautil.CheckedIPAddress('::1', allow_loopback=True)
|
|
|
|
if localhost6.get_matching_interface() is None:
|
|
|
|
raise ValueError("no interface for ::1 address found")
|
|
|
|
except ValueError:
|
|
|
|
raise RuntimeError(
|
|
|
|
"IPv6 stack is enabled in the kernel but there is no "
|
|
|
|
"interface that has ::1 address assigned. Add ::1 address "
|
|
|
|
"resolution to 'lo' interface. You might need to enable IPv6 "
|
|
|
|
"on the interface 'lo' in sysctl.conf.")
|
2017-03-07 06:54:41 -06:00
|
|
|
|
2018-12-12 10:32:06 -06:00
|
|
|
def detect_container(self):
|
|
|
|
"""Check if running inside a container
|
|
|
|
|
|
|
|
:returns: container runtime or None
|
|
|
|
:rtype: str, None
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
output = subprocess.check_output(
|
|
|
|
[paths.SYSTEMD_DETECT_VIRT, '--container'],
|
|
|
|
stderr=subprocess.STDOUT
|
|
|
|
)
|
|
|
|
except subprocess.CalledProcessError as e:
|
|
|
|
if e.returncode == 1:
|
|
|
|
# No container runtime detected
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
else:
|
|
|
|
return output.decode('utf-8').strip()
|
|
|
|
|
2014-10-03 04:14:56 -05:00
|
|
|
def restore_pre_ipa_client_configuration(self, fstore, statestore,
|
|
|
|
was_sssd_installed,
|
|
|
|
was_sssd_configured):
|
|
|
|
|
2018-04-26 09:51:42 -05:00
|
|
|
auth_config = get_auth_tool()
|
|
|
|
auth_config.unconfigure(
|
|
|
|
fstore, statestore, was_sssd_installed, was_sssd_configured
|
|
|
|
)
|
2014-10-03 04:14:56 -05:00
|
|
|
|
|
|
|
def set_nisdomain(self, nisdomain):
|
2018-04-26 09:51:42 -05:00
|
|
|
try:
|
|
|
|
with open(paths.SYSCONF_NETWORK, 'r') as f:
|
|
|
|
content = [
|
|
|
|
line for line in f
|
|
|
|
if not line.strip().upper().startswith('NISDOMAIN')
|
|
|
|
]
|
|
|
|
except IOError:
|
|
|
|
content = []
|
2014-10-03 04:14:56 -05:00
|
|
|
|
2018-04-26 09:51:42 -05:00
|
|
|
content.append("NISDOMAIN={}\n".format(nisdomain))
|
2014-10-03 04:14:56 -05:00
|
|
|
|
2018-04-26 09:51:42 -05:00
|
|
|
with open(paths.SYSCONF_NETWORK, 'w') as f:
|
|
|
|
f.writelines(content)
|
2014-10-03 04:14:56 -05:00
|
|
|
|
2018-06-18 06:27:41 -05:00
|
|
|
def modify_nsswitch_pam_stack(self, sssd, mkhomedir, statestore,
|
|
|
|
sudo=True):
|
2018-04-26 09:51:42 -05:00
|
|
|
auth_config = get_auth_tool()
|
2018-06-18 06:27:41 -05:00
|
|
|
auth_config.configure(sssd, mkhomedir, statestore, sudo)
|
2014-10-03 04:14:56 -05:00
|
|
|
|
2018-04-26 09:51:42 -05:00
|
|
|
def is_nosssd_supported(self):
|
|
|
|
# The flag --no-sssd is not supported any more for rhel-based distros
|
|
|
|
return False
|
2014-10-03 04:14:56 -05:00
|
|
|
|
2015-08-19 01:10:03 -05:00
|
|
|
def backup_auth_configuration(self, path):
|
2018-04-26 09:51:42 -05:00
|
|
|
auth_config = get_auth_tool()
|
2015-08-19 01:10:03 -05:00
|
|
|
auth_config.backup(path)
|
|
|
|
|
|
|
|
def restore_auth_configuration(self, path):
|
2018-04-26 09:51:42 -05:00
|
|
|
auth_config = get_auth_tool()
|
2015-08-19 01:10:03 -05:00
|
|
|
auth_config.restore(path)
|
|
|
|
|
2018-04-26 09:51:42 -05:00
|
|
|
def migrate_auth_configuration(self, statestore):
|
|
|
|
"""
|
|
|
|
Migrate the pam stack configuration from authconfig to an authselect
|
|
|
|
profile.
|
|
|
|
"""
|
|
|
|
# Check if mkhomedir was enabled during installation
|
|
|
|
mkhomedir = statestore.get_state('authconfig', 'mkhomedir')
|
|
|
|
|
|
|
|
# Force authselect 'sssd' profile
|
2018-06-18 06:27:41 -05:00
|
|
|
authselect_cmd = [paths.AUTHSELECT, "select", "sssd", "with-sudo"]
|
2018-04-26 09:51:42 -05:00
|
|
|
if mkhomedir:
|
|
|
|
authselect_cmd.append("with-mkhomedir")
|
|
|
|
authselect_cmd.append("--force")
|
|
|
|
ipautil.run(authselect_cmd)
|
|
|
|
|
|
|
|
# Remove all remaining keys from the authconfig module
|
|
|
|
for conf in ('ldap', 'krb5', 'sssd', 'sssdauth', 'mkhomedir'):
|
|
|
|
statestore.restore_state('authconfig', conf)
|
|
|
|
|
|
|
|
# Create new authselect module in the statestore
|
|
|
|
statestore.backup_state('authselect', 'profile', 'sssd')
|
|
|
|
statestore.backup_state(
|
|
|
|
'authselect', 'features_list', '')
|
|
|
|
statestore.backup_state('authselect', 'mkhomedir', bool(mkhomedir))
|
|
|
|
|
2014-11-10 10:24:22 -06:00
|
|
|
def reload_systemwide_ca_store(self):
|
|
|
|
try:
|
|
|
|
ipautil.run([paths.UPDATE_CA_TRUST])
|
2015-07-30 09:49:29 -05:00
|
|
|
except CalledProcessError as e:
|
2017-05-25 05:42:54 -05:00
|
|
|
logger.error(
|
2014-11-10 10:24:22 -06:00
|
|
|
"Could not update systemwide CA trust database: %s", e)
|
|
|
|
return False
|
|
|
|
else:
|
2017-05-25 05:42:54 -05:00
|
|
|
logger.info("Systemwide CA database updated.")
|
2014-11-10 10:24:22 -06:00
|
|
|
return True
|
|
|
|
|
2014-10-03 04:14:56 -05:00
|
|
|
def insert_ca_certs_into_systemwide_ca_store(self, ca_certs):
|
2017-02-14 02:58:44 -06:00
|
|
|
# pylint: disable=ipa-forbidden-import
|
2016-11-18 08:42:23 -06:00
|
|
|
from ipalib import x509 # FixMe: break import cycle
|
2017-03-01 04:19:08 -06:00
|
|
|
from ipalib.errors import CertificateError
|
2017-02-14 02:58:44 -06:00
|
|
|
# pylint: enable=ipa-forbidden-import
|
2016-11-18 08:42:23 -06:00
|
|
|
|
2014-11-10 10:24:22 -06:00
|
|
|
new_cacert_path = paths.SYSTEMWIDE_IPA_CA_CRT
|
2014-10-03 04:14:56 -05:00
|
|
|
|
|
|
|
if os.path.exists(new_cacert_path):
|
|
|
|
try:
|
|
|
|
os.remove(new_cacert_path)
|
2015-07-30 09:49:29 -05:00
|
|
|
except OSError as e:
|
2017-05-25 05:42:54 -05:00
|
|
|
logger.error(
|
2014-10-03 04:14:56 -05:00
|
|
|
"Could not remove %s: %s", new_cacert_path, e)
|
|
|
|
return False
|
|
|
|
|
|
|
|
new_cacert_path = paths.IPA_P11_KIT
|
|
|
|
|
|
|
|
try:
|
|
|
|
f = open(new_cacert_path, 'w')
|
2018-06-22 05:22:06 -05:00
|
|
|
os.fchmod(f.fileno(), 0o644)
|
2015-07-30 09:49:29 -05:00
|
|
|
except IOError as e:
|
2017-05-25 05:42:54 -05:00
|
|
|
logger.info("Failed to open %s: %s", new_cacert_path, e)
|
2014-10-03 04:14:56 -05:00
|
|
|
return False
|
|
|
|
|
|
|
|
f.write("# This file was created by IPA. Do not edit.\n"
|
|
|
|
"\n")
|
|
|
|
|
|
|
|
has_eku = set()
|
2017-09-18 09:28:10 -05:00
|
|
|
for cert, nickname, trusted, _ext_key_usage in ca_certs:
|
2014-10-03 04:14:56 -05:00
|
|
|
try:
|
2017-06-16 03:18:07 -05:00
|
|
|
subject = cert.subject_bytes
|
|
|
|
issuer = cert.issuer_bytes
|
2017-10-16 06:29:07 -05:00
|
|
|
serial_number = cert.serial_number_bytes
|
2017-06-16 03:18:07 -05:00
|
|
|
public_key_info = cert.public_key_info_bytes
|
2017-03-01 04:19:08 -06:00
|
|
|
except (PyAsn1Error, ValueError, CertificateError) as e:
|
2017-05-25 05:42:54 -05:00
|
|
|
logger.warning(
|
2014-10-03 04:14:56 -05:00
|
|
|
"Failed to decode certificate \"%s\": %s", nickname, e)
|
|
|
|
continue
|
|
|
|
|
2015-09-14 05:52:29 -05:00
|
|
|
label = urllib.parse.quote(nickname)
|
|
|
|
subject = urllib.parse.quote(subject)
|
|
|
|
issuer = urllib.parse.quote(issuer)
|
2017-10-16 06:29:07 -05:00
|
|
|
serial_number = urllib.parse.quote(serial_number)
|
2015-09-14 05:52:29 -05:00
|
|
|
public_key_info = urllib.parse.quote(public_key_info)
|
2014-10-03 04:14:56 -05:00
|
|
|
|
|
|
|
obj = ("[p11-kit-object-v1]\n"
|
|
|
|
"class: certificate\n"
|
|
|
|
"certificate-type: x-509\n"
|
|
|
|
"certificate-category: authority\n"
|
|
|
|
"label: \"%(label)s\"\n"
|
|
|
|
"subject: \"%(subject)s\"\n"
|
|
|
|
"issuer: \"%(issuer)s\"\n"
|
|
|
|
"serial-number: \"%(serial_number)s\"\n"
|
|
|
|
"x-public-key-info: \"%(public_key_info)s\"\n" %
|
|
|
|
dict(label=label,
|
|
|
|
subject=subject,
|
|
|
|
issuer=issuer,
|
|
|
|
serial_number=serial_number,
|
|
|
|
public_key_info=public_key_info))
|
|
|
|
if trusted is True:
|
|
|
|
obj += "trusted: true\n"
|
|
|
|
elif trusted is False:
|
|
|
|
obj += "x-distrusted: true\n"
|
2017-07-27 08:37:17 -05:00
|
|
|
obj += "{pem}\n\n".format(
|
|
|
|
pem=cert.public_bytes(x509.Encoding.PEM).decode('ascii'))
|
2014-10-03 04:14:56 -05:00
|
|
|
f.write(obj)
|
|
|
|
|
2017-09-18 09:28:10 -05:00
|
|
|
if (cert.extended_key_usage is not None and
|
|
|
|
public_key_info not in has_eku):
|
2014-10-03 04:14:56 -05:00
|
|
|
try:
|
2017-06-16 03:18:07 -05:00
|
|
|
ext_key_usage = cert.extended_key_usage_bytes
|
2015-07-30 09:49:29 -05:00
|
|
|
except PyAsn1Error as e:
|
2017-05-25 05:42:54 -05:00
|
|
|
logger.warning(
|
2014-10-03 04:14:56 -05:00
|
|
|
"Failed to encode extended key usage for \"%s\": %s",
|
|
|
|
nickname, e)
|
|
|
|
continue
|
2015-09-14 05:52:29 -05:00
|
|
|
value = urllib.parse.quote(ext_key_usage)
|
2014-10-03 04:14:56 -05:00
|
|
|
obj = ("[p11-kit-object-v1]\n"
|
|
|
|
"class: x-certificate-extension\n"
|
|
|
|
"label: \"ExtendedKeyUsage for %(label)s\"\n"
|
|
|
|
"x-public-key-info: \"%(public_key_info)s\"\n"
|
|
|
|
"object-id: 2.5.29.37\n"
|
|
|
|
"value: \"%(value)s\"\n\n" %
|
|
|
|
dict(label=label,
|
|
|
|
public_key_info=public_key_info,
|
|
|
|
value=value))
|
|
|
|
f.write(obj)
|
|
|
|
has_eku.add(public_key_info)
|
|
|
|
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
# Add the CA to the systemwide CA trust database
|
2014-11-10 10:24:22 -06:00
|
|
|
if not self.reload_systemwide_ca_store():
|
|
|
|
return False
|
2014-10-03 04:14:56 -05:00
|
|
|
|
2014-11-10 10:24:22 -06:00
|
|
|
return True
|
2014-10-03 04:14:56 -05:00
|
|
|
|
|
|
|
def remove_ca_certs_from_systemwide_ca_store(self):
|
2014-11-10 10:24:22 -06:00
|
|
|
result = True
|
2014-10-03 04:14:56 -05:00
|
|
|
update = False
|
|
|
|
|
|
|
|
# Remove CA cert from systemwide store
|
2014-11-10 10:24:22 -06:00
|
|
|
for new_cacert_path in (paths.IPA_P11_KIT,
|
|
|
|
paths.SYSTEMWIDE_IPA_CA_CRT):
|
2014-10-03 04:14:56 -05:00
|
|
|
if not os.path.exists(new_cacert_path):
|
|
|
|
continue
|
|
|
|
try:
|
|
|
|
os.remove(new_cacert_path)
|
2015-07-30 09:49:29 -05:00
|
|
|
except OSError as e:
|
2017-05-25 05:42:54 -05:00
|
|
|
logger.error(
|
2014-10-03 04:14:56 -05:00
|
|
|
"Could not remove %s: %s", new_cacert_path, e)
|
2014-11-10 10:24:22 -06:00
|
|
|
result = False
|
2014-10-03 04:14:56 -05:00
|
|
|
else:
|
|
|
|
update = True
|
|
|
|
|
|
|
|
if update:
|
2014-11-10 10:24:22 -06:00
|
|
|
if not self.reload_systemwide_ca_store():
|
2014-10-03 04:14:56 -05:00
|
|
|
return False
|
|
|
|
|
2014-11-10 10:24:22 -06:00
|
|
|
return result
|
2014-10-03 04:14:56 -05:00
|
|
|
|
2016-07-28 09:13:55 -05:00
|
|
|
def backup_hostname(self, fstore, statestore):
|
2014-10-03 04:14:56 -05:00
|
|
|
filepath = paths.ETC_HOSTNAME
|
|
|
|
if os.path.exists(filepath):
|
|
|
|
fstore.backup_file(filepath)
|
|
|
|
|
|
|
|
# store old hostname
|
2016-07-28 09:13:55 -05:00
|
|
|
old_hostname = socket.gethostname()
|
2014-10-03 04:14:56 -05:00
|
|
|
statestore.backup_state('network', 'hostname', old_hostname)
|
|
|
|
|
2016-04-19 11:36:32 -05:00
|
|
|
def restore_hostname(self, fstore, statestore):
|
2018-07-06 05:47:34 -05:00
|
|
|
old_hostname = statestore.restore_state('network', 'hostname')
|
2016-04-19 11:36:32 -05:00
|
|
|
|
|
|
|
if old_hostname is not None:
|
|
|
|
try:
|
|
|
|
self.set_hostname(old_hostname)
|
|
|
|
except ipautil.CalledProcessError as e:
|
2017-05-25 05:42:54 -05:00
|
|
|
logger.debug("%s", traceback.format_exc())
|
|
|
|
logger.error(
|
2016-04-19 11:36:32 -05:00
|
|
|
"Failed to restore this machine hostname to %s (%s).",
|
|
|
|
old_hostname, e
|
|
|
|
)
|
2014-10-03 04:14:56 -05:00
|
|
|
|
|
|
|
filepath = paths.ETC_HOSTNAME
|
|
|
|
if fstore.has_file(filepath):
|
|
|
|
fstore.restore_file(filepath)
|
|
|
|
|
|
|
|
def set_selinux_booleans(self, required_settings, backup_func=None):
|
|
|
|
def get_setsebool_args(changes):
|
|
|
|
args = [paths.SETSEBOOL, "-P"]
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
args.extend(["%s=%s" % update for update in changes.items()])
|
2014-10-03 04:14:56 -05:00
|
|
|
|
|
|
|
return args
|
|
|
|
|
2019-04-25 06:24:48 -05:00
|
|
|
if not self.is_selinux_enabled():
|
2014-10-03 04:14:56 -05:00
|
|
|
return False
|
|
|
|
|
|
|
|
updated_vars = {}
|
|
|
|
failed_vars = {}
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
for setting, state in required_settings.items():
|
2014-11-18 03:40:31 -06:00
|
|
|
if state is None:
|
|
|
|
continue
|
2014-10-03 04:14:56 -05:00
|
|
|
try:
|
2015-11-25 10:17:18 -06:00
|
|
|
result = ipautil.run(
|
|
|
|
[paths.GETSEBOOL, setting],
|
|
|
|
capture_output=True
|
|
|
|
)
|
|
|
|
original_state = result.output.split()[2]
|
2014-10-03 04:14:56 -05:00
|
|
|
if backup_func is not None:
|
|
|
|
backup_func(setting, original_state)
|
|
|
|
|
|
|
|
if original_state != state:
|
|
|
|
updated_vars[setting] = state
|
2015-07-30 09:49:29 -05:00
|
|
|
except ipautil.CalledProcessError as e:
|
2017-05-25 05:42:54 -05:00
|
|
|
logger.error("Cannot get SELinux boolean '%s': %s", setting, e)
|
2014-10-03 04:14:56 -05:00
|
|
|
failed_vars[setting] = state
|
|
|
|
|
|
|
|
if updated_vars:
|
|
|
|
args = get_setsebool_args(updated_vars)
|
|
|
|
try:
|
|
|
|
ipautil.run(args)
|
|
|
|
except ipautil.CalledProcessError:
|
|
|
|
failed_vars.update(updated_vars)
|
|
|
|
|
|
|
|
if failed_vars:
|
|
|
|
raise ipapython.errors.SetseboolError(
|
|
|
|
failed=failed_vars,
|
|
|
|
command=' '.join(get_setsebool_args(failed_vars)))
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
2015-12-09 11:53:35 -06:00
|
|
|
def parse_ipa_version(self, version):
|
|
|
|
"""
|
|
|
|
:param version: textual version
|
|
|
|
:return: object implementing proper __cmp__ method for version compare
|
|
|
|
"""
|
|
|
|
return IPAVersion(version)
|
|
|
|
|
2016-03-16 03:04:42 -05:00
|
|
|
def configure_httpd_service_ipa_conf(self):
|
|
|
|
"""Create systemd config for httpd service to work with IPA
|
|
|
|
"""
|
|
|
|
if not os.path.exists(paths.SYSTEMD_SYSTEM_HTTPD_D_DIR):
|
|
|
|
os.mkdir(paths.SYSTEMD_SYSTEM_HTTPD_D_DIR, 0o755)
|
|
|
|
|
|
|
|
ipautil.copy_template_file(
|
2016-11-22 09:06:45 -06:00
|
|
|
os.path.join(paths.USR_SHARE_IPA_DIR, 'ipa-httpd.conf.template'),
|
2016-03-16 03:04:42 -05:00
|
|
|
paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF,
|
|
|
|
dict(
|
|
|
|
KDCPROXY_CONFIG=paths.KDCPROXY_CONFIG,
|
|
|
|
IPA_HTTPD_KDCPROXY=paths.IPA_HTTPD_KDCPROXY,
|
2017-02-15 03:44:59 -06:00
|
|
|
KRB5CC_HTTPD=paths.KRB5CC_HTTPD,
|
2016-03-16 03:04:42 -05:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
os.chmod(paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF, 0o644)
|
|
|
|
self.restore_context(paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF)
|
2019-04-02 09:13:05 -05:00
|
|
|
self.systemd_daemon_reload()
|
2016-03-16 03:04:42 -05:00
|
|
|
|
2019-04-02 09:13:05 -05:00
|
|
|
def systemd_daemon_reload(self):
|
|
|
|
"""Tell systemd to reload config files"""
|
|
|
|
ipautil.run([paths.SYSTEMCTL, "--system", "daemon-reload"])
|
2017-03-16 07:51:29 -05:00
|
|
|
|
2017-08-30 07:25:58 -05:00
|
|
|
def configure_http_gssproxy_conf(self, ipaapi_user):
|
2016-11-29 10:10:22 -06:00
|
|
|
ipautil.copy_template_file(
|
|
|
|
os.path.join(paths.USR_SHARE_IPA_DIR, 'gssproxy.conf.template'),
|
|
|
|
paths.GSSPROXY_CONF,
|
|
|
|
dict(
|
|
|
|
HTTP_KEYTAB=paths.HTTP_KEYTAB,
|
|
|
|
HTTP_CCACHE=paths.HTTP_CCACHE,
|
2016-08-16 08:03:19 -05:00
|
|
|
HTTPD_USER=constants.HTTPD_USER,
|
2017-08-30 07:25:58 -05:00
|
|
|
IPAAPI_USER=ipaapi_user,
|
2016-11-29 10:10:22 -06:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
os.chmod(paths.GSSPROXY_CONF, 0o600)
|
|
|
|
self.restore_context(paths.GSSPROXY_CONF)
|
|
|
|
|
2018-02-06 03:05:49 -06:00
|
|
|
def configure_httpd_wsgi_conf(self):
|
|
|
|
"""Configure WSGI for correct Python version (Fedora)
|
|
|
|
|
|
|
|
See https://pagure.io/freeipa/issue/7394
|
|
|
|
"""
|
|
|
|
conf = paths.HTTPD_IPA_WSGI_MODULES_CONF
|
|
|
|
if sys.version_info.major == 2:
|
|
|
|
wsgi_module = constants.MOD_WSGI_PYTHON2
|
|
|
|
else:
|
|
|
|
wsgi_module = constants.MOD_WSGI_PYTHON3
|
|
|
|
|
|
|
|
if conf is None or wsgi_module is None:
|
|
|
|
logger.info("Nothing to do for configure_httpd_wsgi_conf")
|
|
|
|
return
|
|
|
|
|
|
|
|
confdir = os.path.dirname(conf)
|
|
|
|
if not os.path.isdir(confdir):
|
|
|
|
os.makedirs(confdir)
|
|
|
|
|
|
|
|
ipautil.copy_template_file(
|
|
|
|
os.path.join(
|
|
|
|
paths.USR_SHARE_IPA_DIR, 'ipa-httpd-wsgi.conf.template'
|
|
|
|
),
|
|
|
|
conf,
|
|
|
|
dict(WSGI_MODULE=wsgi_module)
|
|
|
|
)
|
|
|
|
|
|
|
|
os.chmod(conf, 0o644)
|
|
|
|
self.restore_context(conf)
|
|
|
|
|
2016-03-16 03:04:42 -05:00
|
|
|
def remove_httpd_service_ipa_conf(self):
|
|
|
|
"""Remove systemd config for httpd service of IPA"""
|
|
|
|
try:
|
|
|
|
os.unlink(paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF)
|
|
|
|
except OSError as e:
|
2016-06-30 02:15:45 -05:00
|
|
|
if e.errno == errno.ENOENT:
|
2017-05-25 05:42:54 -05:00
|
|
|
logger.debug(
|
2016-06-30 02:15:45 -05:00
|
|
|
'Trying to remove %s but file does not exist',
|
|
|
|
paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF
|
|
|
|
)
|
|
|
|
else:
|
2017-05-25 05:42:54 -05:00
|
|
|
logger.error(
|
2016-06-30 02:15:45 -05:00
|
|
|
'Error removing %s: %s',
|
|
|
|
paths.SYSTEMD_SYSTEM_HTTPD_IPA_CONF, e
|
|
|
|
)
|
2017-03-16 07:51:29 -05:00
|
|
|
return
|
|
|
|
|
2019-04-02 09:13:05 -05:00
|
|
|
self.systemd_daemon_reload()
|
2014-10-03 04:14:56 -05:00
|
|
|
|
2019-05-23 09:45:26 -05:00
|
|
|
def configure_httpd_protocol(self):
|
2019-07-01 03:41:23 -05:00
|
|
|
# TLS 1.3 is not yet supported
|
2019-05-23 09:45:26 -05:00
|
|
|
directivesetter.set_directive(paths.HTTPD_SSL_CONF,
|
|
|
|
'SSLProtocol',
|
2019-07-01 03:41:23 -05:00
|
|
|
'TLSv1.2', False)
|
2019-05-23 09:45:26 -05:00
|
|
|
|
2016-04-19 11:36:32 -05:00
|
|
|
def set_hostname(self, hostname):
|
|
|
|
ipautil.run([paths.BIN_HOSTNAMECTL, 'set-hostname', hostname])
|
|
|
|
|
2016-11-23 09:13:31 -06:00
|
|
|
def is_fips_enabled(self):
|
|
|
|
"""
|
|
|
|
Checks whether this host is FIPS-enabled.
|
|
|
|
|
|
|
|
Returns a boolean indicating if the host is FIPS-enabled, i.e. if the
|
|
|
|
file /proc/sys/crypto/fips_enabled contains a non-0 value. Otherwise,
|
|
|
|
or if the file /proc/sys/crypto/fips_enabled does not exist,
|
|
|
|
the function returns False.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
with open(paths.PROC_FIPS_ENABLED, 'r') as f:
|
|
|
|
if f.read().strip() != '0':
|
|
|
|
return True
|
|
|
|
except IOError:
|
|
|
|
# Consider that the host is not fips-enabled if the file does not
|
|
|
|
# exist
|
|
|
|
pass
|
|
|
|
return False
|
|
|
|
|
2018-05-21 05:46:42 -05:00
|
|
|
def setup_httpd_logging(self):
|
2018-05-23 03:37:58 -05:00
|
|
|
directivesetter.set_directive(paths.HTTPD_SSL_CONF,
|
|
|
|
'ErrorLog',
|
|
|
|
'logs/error_log', False)
|
|
|
|
directivesetter.set_directive(paths.HTTPD_SSL_CONF,
|
|
|
|
'TransferLog',
|
|
|
|
'logs/access_log', False)
|
2018-05-21 05:46:42 -05:00
|
|
|
|
2019-04-05 06:39:13 -05:00
|
|
|
def configure_dns_resolver(self, nameservers, searchdomains, fstore=None):
|
|
|
|
"""Configure global DNS resolver (e.g. /etc/resolv.conf)
|
|
|
|
|
|
|
|
:param nameservers: list of IP addresses
|
|
|
|
:param searchdomains: list of search domaons
|
|
|
|
:param fstore: optional file store for backup
|
|
|
|
"""
|
|
|
|
assert nameservers and isinstance(nameservers, list)
|
|
|
|
assert searchdomains and isinstance(searchdomains, list)
|
|
|
|
|
|
|
|
# break circular import
|
|
|
|
from ipaplatform.services import knownservices
|
|
|
|
|
|
|
|
if fstore is not None and not fstore.has_file(paths.RESOLV_CONF):
|
|
|
|
fstore.backup_file(paths.RESOLV_CONF)
|
|
|
|
|
|
|
|
nm = knownservices['NetworkManager']
|
|
|
|
if nm.is_enabled():
|
|
|
|
logger.debug(
|
|
|
|
"Network Manager is enabled, write %s",
|
|
|
|
paths.NETWORK_MANAGER_IPA_CONF
|
|
|
|
)
|
|
|
|
# write DNS override and reload network manager to have it create
|
|
|
|
# a new resolv.conf. The file is prefixed with ``zzz`` to
|
|
|
|
# make it the last file. Global dns options do not stack and last
|
|
|
|
# man standing wins.
|
|
|
|
cfg = NM_IPA_CONF.format(
|
|
|
|
servers=','.join(nameservers),
|
|
|
|
searches=','.join(searchdomains)
|
|
|
|
)
|
|
|
|
with open(paths.NETWORK_MANAGER_IPA_CONF, 'w') as f:
|
|
|
|
os.fchmod(f.fileno(), 0o644)
|
|
|
|
f.write(cfg)
|
|
|
|
# reload NetworkManager
|
|
|
|
nm.reload_or_restart()
|
|
|
|
else:
|
|
|
|
# no NM running, fall back to /etc/resolv.conf
|
|
|
|
logger.debug(
|
|
|
|
"Network Manager is not enabled, write %s directly.",
|
|
|
|
paths.RESOLV_CONF
|
|
|
|
)
|
|
|
|
cfg = [
|
|
|
|
"# auto-generated by IPA installer",
|
|
|
|
"search {}".format(' '.join(searchdomains)),
|
|
|
|
]
|
|
|
|
for nameserver in nameservers:
|
|
|
|
cfg.append("nameserver {}".format(nameserver))
|
|
|
|
with open(paths.RESOLV_CONF, 'w') as f:
|
|
|
|
f.write('\n'.join(cfg))
|
|
|
|
|
|
|
|
def unconfigure_dns_resolver(self, fstore=None):
|
|
|
|
"""Unconfigure global DNS resolver (e.g. /etc/resolv.conf)
|
|
|
|
|
|
|
|
:param fstore: optional file store for restore
|
|
|
|
"""
|
|
|
|
# break circular import
|
|
|
|
from ipaplatform.services import knownservices
|
|
|
|
|
|
|
|
if fstore is not None and fstore.has_file(paths.RESOLV_CONF):
|
|
|
|
fstore.restore_file(paths.RESOLV_CONF)
|
|
|
|
|
|
|
|
nm = knownservices['NetworkManager']
|
|
|
|
if os.path.isfile(paths.NETWORK_MANAGER_IPA_CONF):
|
|
|
|
os.unlink(paths.NETWORK_MANAGER_IPA_CONF)
|
|
|
|
if nm.is_enabled():
|
|
|
|
nm.reload_or_restart()
|
|
|
|
|
2019-04-24 06:13:45 -05:00
|
|
|
def configure_pkcs11_modules(self, fstore):
|
|
|
|
"""Disable global p11-kit configuration for NSS
|
|
|
|
"""
|
|
|
|
filenames = []
|
|
|
|
for name, module, disabled_in in PKCS11_MODULES:
|
|
|
|
filename = os.path.join(
|
|
|
|
paths.ETC_PKCS11_MODULES_DIR,
|
|
|
|
"{}.module".format(name)
|
|
|
|
)
|
|
|
|
if os.path.isfile(filename):
|
|
|
|
# Only back up if file is not yet backed up and it does not
|
|
|
|
# look like a file that is generated by IPA.
|
|
|
|
with open(filename) as f:
|
|
|
|
content = f.read()
|
|
|
|
is_ipa_file = "IPA" in content
|
|
|
|
if not is_ipa_file and not fstore.has_file(filename):
|
|
|
|
logger.debug("Backing up existing '%s'.", filename)
|
|
|
|
fstore.backup_file(filename)
|
|
|
|
|
|
|
|
with open(filename, "w") as f:
|
|
|
|
f.write("# created by IPA installer\n")
|
|
|
|
f.write("module: {}\n".format(module))
|
|
|
|
# see man(5) pkcs11.conf
|
|
|
|
f.write("disable-in: {}\n".format(", ".join(disabled_in)))
|
|
|
|
os.fchmod(f.fileno(), 0o644)
|
2019-08-01 02:08:36 -05:00
|
|
|
self.restore_context(filename)
|
2019-04-24 06:13:45 -05:00
|
|
|
logger.debug("Created PKCS#11 module config '%s'.", filename)
|
|
|
|
filenames.append(filename)
|
|
|
|
|
|
|
|
return filenames
|
|
|
|
|
|
|
|
def restore_pkcs11_modules(self, fstore):
|
|
|
|
"""Restore global p11-kit configuration for NSS
|
|
|
|
"""
|
|
|
|
filenames = []
|
|
|
|
for name, _module, _disabled_in in PKCS11_MODULES:
|
|
|
|
filename = os.path.join(
|
|
|
|
paths.ETC_PKCS11_MODULES_DIR,
|
|
|
|
"{}.module".format(name)
|
|
|
|
)
|
|
|
|
try:
|
|
|
|
os.unlink(filename)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
filenames.append(filename)
|
|
|
|
|
|
|
|
if fstore.has_file(filename):
|
|
|
|
fstore.restore_file(filename)
|
|
|
|
|
|
|
|
return filenames
|
2016-12-01 10:37:20 -06:00
|
|
|
|
2019-09-19 08:21:52 -05:00
|
|
|
def get_pkcs11_modules(self):
|
|
|
|
"""Return the list of module config files setup by IPA
|
|
|
|
"""
|
|
|
|
return tuple(os.path.join(paths.ETC_PKCS11_MODULES_DIR,
|
|
|
|
"{}.module".format(name))
|
|
|
|
for name, _module, _disabled in PKCS11_MODULES)
|
|
|
|
|
2019-08-16 13:10:15 -05:00
|
|
|
def enable_ldap_automount(self, statestore):
|
|
|
|
"""
|
|
|
|
Point automount to ldap in nsswitch.conf.
|
|
|
|
This function is for non-SSSD setups only.
|
|
|
|
"""
|
|
|
|
super(RedHatTaskNamespace, self).enable_ldap_automount(statestore)
|
|
|
|
|
|
|
|
authselect_cmd = [paths.AUTHSELECT, "enable-feature",
|
|
|
|
"with-custom-automount"]
|
|
|
|
ipautil.run(authselect_cmd)
|
|
|
|
|
|
|
|
def disable_ldap_automount(self, statestore):
|
|
|
|
"""Disable ldap-based automount"""
|
|
|
|
super(RedHatTaskNamespace, self).disable_ldap_automount(statestore)
|
|
|
|
|
|
|
|
authselect_cmd = [paths.AUTHSELECT, "disable-feature",
|
|
|
|
"with-custom-automount"]
|
|
|
|
ipautil.run(authselect_cmd)
|
|
|
|
|
2014-10-03 04:14:56 -05:00
|
|
|
tasks = RedHatTaskNamespace()
|