2013-08-20 06:38:29 -05:00
|
|
|
# Authors: Rob Crittenden <rcritten@redhat.com>
|
2013-03-13 08:36:41 -05:00
|
|
|
#
|
|
|
|
# Copyright (C) 2013 Red Hat
|
|
|
|
# see file 'COPYING' for use and warranty information
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
|
|
|
|
import os
|
|
|
|
import shutil
|
|
|
|
import tempfile
|
|
|
|
import time
|
|
|
|
import pwd
|
2015-01-12 09:37:33 -06:00
|
|
|
import ldif
|
2015-01-12 06:44:21 -06:00
|
|
|
import itertools
|
2013-03-13 08:36:41 -05:00
|
|
|
|
2016-08-24 06:37:30 -05:00
|
|
|
# pylint: disable=import-error
|
2015-09-14 07:03:58 -05:00
|
|
|
from six.moves.configparser import SafeConfigParser
|
2016-08-24 06:37:30 -05:00
|
|
|
# pylint: enable=import-error
|
2015-09-14 07:03:58 -05:00
|
|
|
|
2016-11-22 23:23:47 -06:00
|
|
|
from ipaclient.install.client import update_ipa_nssdb
|
2016-03-22 14:05:39 -05:00
|
|
|
from ipalib import api, errors
|
|
|
|
from ipalib.constants import FQDN
|
2016-11-22 23:23:47 -06:00
|
|
|
from ipapython import version, ipautil
|
2013-03-13 08:36:41 -05:00
|
|
|
from ipapython.ipautil import run, user_input
|
|
|
|
from ipapython import admintool
|
|
|
|
from ipapython.dn import DN
|
|
|
|
from ipaserver.install.replication import (wait_for_task, ReplicationManager,
|
2013-09-02 03:56:19 -05:00
|
|
|
get_cs_replication_manager)
|
2013-03-13 08:36:41 -05:00
|
|
|
from ipaserver.install import installutils
|
2017-01-20 01:33:22 -06:00
|
|
|
from ipaserver.install import dsinstance, httpinstance, cainstance, krbinstance
|
2013-03-13 08:36:41 -05:00
|
|
|
from ipapython import ipaldap
|
2014-08-07 04:09:38 -05:00
|
|
|
import ipapython.errors
|
2016-01-19 07:18:30 -06:00
|
|
|
from ipaplatform.constants import constants
|
2014-05-29 03:51:08 -05:00
|
|
|
from ipaplatform.tasks import tasks
|
2014-05-29 03:37:18 -05:00
|
|
|
from ipaplatform import services
|
2014-05-29 07:47:17 -05:00
|
|
|
from ipaplatform.paths import paths
|
2013-03-13 08:36:41 -05:00
|
|
|
|
2014-11-10 06:29:58 -06:00
|
|
|
try:
|
|
|
|
from ipaserver.install import adtrustinstance
|
|
|
|
except ImportError:
|
|
|
|
adtrustinstance = None
|
|
|
|
|
2013-03-13 08:36:41 -05:00
|
|
|
def recursive_chown(path, uid, gid):
|
|
|
|
'''
|
|
|
|
Change ownership of all files and directories in a path.
|
|
|
|
'''
|
|
|
|
for root, dirs, files in os.walk(path):
|
|
|
|
for dir in dirs:
|
|
|
|
os.chown(os.path.join(root, dir), uid, gid)
|
2015-07-15 09:38:06 -05:00
|
|
|
os.chmod(os.path.join(root, dir), 0o750)
|
2013-03-13 08:36:41 -05:00
|
|
|
for file in files:
|
|
|
|
os.chown(os.path.join(root, file), uid, gid)
|
2015-07-15 09:38:06 -05:00
|
|
|
os.chmod(os.path.join(root, file), 0o640)
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
|
|
|
|
def decrypt_file(tmpdir, filename, keyring):
|
|
|
|
source = filename
|
|
|
|
(dest, ext) = os.path.splitext(filename)
|
|
|
|
|
|
|
|
if ext != '.gpg':
|
|
|
|
raise admintool.ScriptError('Trying to decrypt a non-gpg file')
|
|
|
|
|
|
|
|
dest = os.path.basename(dest)
|
|
|
|
dest = os.path.join(tmpdir, dest)
|
|
|
|
|
2014-05-29 07:47:17 -05:00
|
|
|
args = [paths.GPG,
|
2013-03-13 08:36:41 -05:00
|
|
|
'--batch',
|
|
|
|
'-o', dest]
|
|
|
|
|
|
|
|
if keyring is not None:
|
|
|
|
args.append('--no-default-keyring')
|
|
|
|
args.append('--keyring')
|
|
|
|
args.append(keyring + '.pub')
|
|
|
|
args.append('--secret-keyring')
|
|
|
|
args.append(keyring + '.sec')
|
|
|
|
|
|
|
|
args.append('-d')
|
|
|
|
args.append(source)
|
|
|
|
|
2015-11-25 10:17:18 -06:00
|
|
|
result = run(args, raiseonerr=False)
|
|
|
|
if result.returncode != 0:
|
|
|
|
raise admintool.ScriptError('gpg failed: %s' % result.error_log)
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
return dest
|
|
|
|
|
|
|
|
|
2015-01-12 09:37:33 -06:00
|
|
|
class RemoveRUVParser(ldif.LDIFParser):
|
|
|
|
def __init__(self, input_file, writer, logger):
|
|
|
|
ldif.LDIFParser.__init__(self, input_file)
|
|
|
|
self.writer = writer
|
|
|
|
self.log = logger
|
|
|
|
|
|
|
|
def handle(self, dn, entry):
|
|
|
|
objectclass = None
|
|
|
|
nsuniqueid = None
|
|
|
|
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
for name, value in entry.items():
|
2015-01-12 09:37:33 -06:00
|
|
|
name = name.lower()
|
|
|
|
if name == 'objectclass':
|
|
|
|
objectclass = [x.lower() for x in value]
|
|
|
|
elif name == 'nsuniqueid':
|
|
|
|
nsuniqueid = [x.lower() for x in value]
|
|
|
|
|
|
|
|
if (objectclass and nsuniqueid and
|
|
|
|
'nstombstone' in objectclass and
|
|
|
|
'ffffffff-ffffffff-ffffffff-ffffffff' in nsuniqueid):
|
|
|
|
self.log.debug("Removing RUV entry %s", dn)
|
|
|
|
return
|
|
|
|
|
|
|
|
self.writer.unparse(dn, entry)
|
|
|
|
|
|
|
|
|
2013-03-13 08:36:41 -05:00
|
|
|
class Restore(admintool.AdminTool):
|
|
|
|
command_name = 'ipa-restore'
|
2014-05-29 07:47:17 -05:00
|
|
|
log_file_name = paths.IPARESTORE_LOG
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
usage = "%prog [options] backup"
|
|
|
|
|
|
|
|
description = "Restore IPA files and databases."
|
|
|
|
|
2015-09-10 09:35:54 -05:00
|
|
|
# directories and files listed here will be removed from filesystem before
|
|
|
|
# files from backup are copied
|
|
|
|
DIRS_TO_BE_REMOVED = [
|
|
|
|
paths.DNSSEC_TOKENS_DIR,
|
|
|
|
]
|
|
|
|
|
|
|
|
FILES_TO_BE_REMOVED = []
|
|
|
|
|
2013-03-13 08:36:41 -05:00
|
|
|
def __init__(self, options, args):
|
|
|
|
super(Restore, self).__init__(options, args)
|
|
|
|
self._conn = None
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def add_options(cls, parser):
|
|
|
|
super(Restore, cls).add_options(parser, debug_option=True)
|
|
|
|
|
|
|
|
parser.add_option("-p", "--password", dest="password",
|
|
|
|
help="Directory Manager password")
|
|
|
|
parser.add_option("--gpg-keyring", dest="gpg_keyring",
|
|
|
|
help="The gpg key name to be used")
|
|
|
|
parser.add_option("--data", dest="data_only", action="store_true",
|
|
|
|
default=False, help="Restore only the data")
|
|
|
|
parser.add_option("--online", dest="online", action="store_true",
|
|
|
|
default=False, help="Perform the LDAP restores online, for data only.")
|
|
|
|
parser.add_option("--instance", dest="instance",
|
|
|
|
help="The 389-ds instance to restore (defaults to all found)")
|
|
|
|
parser.add_option("--backend", dest="backend",
|
|
|
|
help="The backend to restore within the instance or instances")
|
|
|
|
parser.add_option('--no-logs', dest="no_logs", action="store_true",
|
|
|
|
default=False, help="Do not restore log files from the backup")
|
|
|
|
parser.add_option('-U', '--unattended', dest="unattended",
|
|
|
|
action="store_true", default=False,
|
|
|
|
help="Unattended restoration never prompts the user")
|
|
|
|
|
|
|
|
|
|
|
|
def setup_logging(self, log_file_mode='a'):
|
|
|
|
super(Restore, self).setup_logging(log_file_mode='a')
|
|
|
|
|
|
|
|
|
|
|
|
def validate_options(self):
|
2015-01-12 06:44:21 -06:00
|
|
|
parser = self.option_parser
|
2013-03-13 08:36:41 -05:00
|
|
|
options = self.options
|
|
|
|
super(Restore, self).validate_options(needs_root=True)
|
|
|
|
|
|
|
|
if len(self.args) < 1:
|
2015-01-12 06:44:21 -06:00
|
|
|
parser.error("must provide the backup to restore")
|
2013-03-13 08:36:41 -05:00
|
|
|
elif len(self.args) > 1:
|
2015-01-12 06:44:21 -06:00
|
|
|
parser.error("must provide exactly one name for the backup")
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
dirname = self.args[0]
|
|
|
|
if not os.path.isabs(dirname):
|
2015-01-14 06:05:09 -06:00
|
|
|
dirname = os.path.join(paths.IPA_BACKUP_DIR, dirname)
|
2014-11-21 05:30:17 -06:00
|
|
|
if not os.path.isdir(dirname):
|
2015-01-12 06:44:21 -06:00
|
|
|
parser.error("must provide path to backup directory")
|
2014-11-21 05:30:17 -06:00
|
|
|
|
2013-03-13 08:36:41 -05:00
|
|
|
if options.gpg_keyring:
|
|
|
|
if (not os.path.exists(options.gpg_keyring + '.pub') or
|
2015-01-12 06:44:21 -06:00
|
|
|
not os.path.exists(options.gpg_keyring + '.sec')):
|
|
|
|
parser.error("no such key %s" % options.gpg_keyring)
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
|
|
|
|
def ask_for_options(self):
|
|
|
|
options = self.options
|
|
|
|
super(Restore, self).ask_for_options()
|
|
|
|
|
|
|
|
# get the directory manager password
|
|
|
|
self.dirman_password = options.password
|
|
|
|
if not options.password:
|
|
|
|
if not options.unattended:
|
|
|
|
self.dirman_password = installutils.read_password(
|
|
|
|
"Directory Manager (existing master)",
|
|
|
|
confirm=False, validate=False)
|
|
|
|
if self.dirman_password is None:
|
|
|
|
raise admintool.ScriptError(
|
|
|
|
"Directory Manager password required")
|
|
|
|
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
options = self.options
|
|
|
|
super(Restore, self).run()
|
|
|
|
|
2015-01-12 06:44:21 -06:00
|
|
|
self.backup_dir = self.args[0]
|
|
|
|
if not os.path.isabs(self.backup_dir):
|
2015-01-14 06:05:09 -06:00
|
|
|
self.backup_dir = os.path.join(paths.IPA_BACKUP_DIR, self.backup_dir)
|
2015-01-12 06:44:21 -06:00
|
|
|
|
2013-03-13 08:36:41 -05:00
|
|
|
self.log.info("Preparing restore from %s on %s",
|
2016-03-22 14:05:39 -05:00
|
|
|
self.backup_dir, FQDN)
|
2013-03-13 08:36:41 -05:00
|
|
|
|
2015-01-12 06:44:21 -06:00
|
|
|
self.header = os.path.join(self.backup_dir, 'header')
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.read_header()
|
|
|
|
except IOError as e:
|
|
|
|
raise admintool.ScriptError("Cannot read backup metadata: %s" % e)
|
|
|
|
|
|
|
|
if options.data_only:
|
|
|
|
restore_type = 'DATA'
|
|
|
|
else:
|
|
|
|
restore_type = self.backup_type
|
|
|
|
|
|
|
|
# These checks would normally be in the validate method but
|
|
|
|
# we need to know the type of backup we're dealing with.
|
|
|
|
if restore_type == 'FULL':
|
|
|
|
if options.online:
|
|
|
|
raise admintool.ScriptError(
|
|
|
|
"File restoration cannot be done online")
|
|
|
|
if options.instance or options.backend:
|
|
|
|
raise admintool.ScriptError(
|
|
|
|
"Restore must be in data-only mode when restoring a "
|
|
|
|
"specific instance or backend")
|
2013-03-13 08:36:41 -05:00
|
|
|
else:
|
2015-01-12 06:44:21 -06:00
|
|
|
installutils.check_server_configuration()
|
|
|
|
|
2015-03-04 09:06:47 -06:00
|
|
|
self.init_api()
|
|
|
|
|
2014-12-01 06:12:15 -06:00
|
|
|
if options.instance:
|
2015-01-12 06:44:21 -06:00
|
|
|
instance_dir = (paths.VAR_LIB_SLAPD_INSTANCE_DIR_TEMPLATE %
|
|
|
|
options.instance)
|
|
|
|
if not os.path.exists(instance_dir):
|
|
|
|
raise admintool.ScriptError(
|
|
|
|
"Instance %s does not exist" % options.instance)
|
|
|
|
|
2015-03-04 09:06:47 -06:00
|
|
|
self.instances = [options.instance]
|
2014-12-01 06:12:15 -06:00
|
|
|
|
|
|
|
if options.backend:
|
2015-03-04 09:06:47 -06:00
|
|
|
for instance in self.instances:
|
2015-01-12 06:44:21 -06:00
|
|
|
db_dir = (paths.SLAPD_INSTANCE_DB_DIR_TEMPLATE %
|
|
|
|
(instance, options.backend))
|
|
|
|
if os.path.exists(db_dir):
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise admintool.ScriptError(
|
|
|
|
"Backend %s does not exist" % options.backend)
|
2014-12-01 06:12:15 -06:00
|
|
|
|
2015-03-04 09:06:47 -06:00
|
|
|
self.backends = [options.backend]
|
2014-12-01 06:12:15 -06:00
|
|
|
|
2015-03-04 09:06:47 -06:00
|
|
|
for instance, backend in itertools.product(self.instances,
|
|
|
|
self.backends):
|
2015-01-12 06:44:21 -06:00
|
|
|
db_dir = (paths.SLAPD_INSTANCE_DB_DIR_TEMPLATE %
|
|
|
|
(instance, backend))
|
|
|
|
if os.path.exists(db_dir):
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise admintool.ScriptError(
|
|
|
|
"Cannot restore a data backup into an empty system")
|
|
|
|
|
|
|
|
self.log.info("Performing %s restore from %s backup" %
|
|
|
|
(restore_type, self.backup_type))
|
|
|
|
|
2016-03-22 14:05:39 -05:00
|
|
|
if self.backup_host != FQDN:
|
2015-01-12 06:44:21 -06:00
|
|
|
raise admintool.ScriptError(
|
|
|
|
"Host name %s does not match backup name %s" %
|
2016-03-22 14:05:39 -05:00
|
|
|
(FQDN, self.backup_host))
|
2015-01-12 06:44:21 -06:00
|
|
|
|
|
|
|
if self.backup_ipa_version != str(version.VERSION):
|
|
|
|
self.log.warning(
|
|
|
|
"Restoring data from a different release of IPA.\n"
|
|
|
|
"Data is version %s.\n"
|
|
|
|
"Server is running %s." %
|
|
|
|
(self.backup_ipa_version, str(version.VERSION)))
|
|
|
|
if (not options.unattended and
|
|
|
|
not user_input("Continue to restore?", False)):
|
|
|
|
raise admintool.ScriptError("Aborted")
|
2013-03-13 08:36:41 -05:00
|
|
|
|
2016-01-19 07:18:30 -06:00
|
|
|
pent = pwd.getpwnam(constants.DS_USER)
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
# Temporary directory for decrypting files before restoring
|
|
|
|
self.top_dir = tempfile.mkdtemp("ipa")
|
|
|
|
os.chown(self.top_dir, pent.pw_uid, pent.pw_gid)
|
2015-07-15 09:38:06 -05:00
|
|
|
os.chmod(self.top_dir, 0o750)
|
2013-03-13 08:36:41 -05:00
|
|
|
self.dir = os.path.join(self.top_dir, "ipa")
|
2015-12-09 06:40:04 -06:00
|
|
|
os.mkdir(self.dir)
|
|
|
|
os.chmod(self.dir, 0o750)
|
2013-03-13 08:36:41 -05:00
|
|
|
os.chown(self.dir, pent.pw_uid, pent.pw_gid)
|
|
|
|
|
|
|
|
cwd = os.getcwd()
|
|
|
|
try:
|
2014-05-29 03:37:18 -05:00
|
|
|
dirsrv = services.knownservices.dirsrv
|
2013-03-13 08:36:41 -05:00
|
|
|
|
2014-12-01 06:12:15 -06:00
|
|
|
self.extract_backup(options.gpg_keyring)
|
|
|
|
|
2015-03-04 09:06:47 -06:00
|
|
|
if restore_type == 'FULL':
|
|
|
|
self.restore_default_conf()
|
|
|
|
self.init_api(confdir=self.dir + paths.ETC_IPA)
|
|
|
|
|
2015-01-12 06:44:21 -06:00
|
|
|
databases = []
|
2015-03-04 09:06:47 -06:00
|
|
|
for instance in self.instances:
|
|
|
|
for backend in self.backends:
|
2015-01-12 06:44:21 -06:00
|
|
|
database = (instance, backend)
|
|
|
|
ldiffile = os.path.join(self.dir, '%s-%s.ldif' % database)
|
|
|
|
if os.path.exists(ldiffile):
|
|
|
|
databases.append(database)
|
|
|
|
|
|
|
|
if options.instance:
|
|
|
|
for instance, backend in databases:
|
|
|
|
if instance == options.instance:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise admintool.ScriptError(
|
|
|
|
"Instance %s not found in backup" % options.instance)
|
|
|
|
|
|
|
|
if options.backend:
|
|
|
|
for instance, backend in databases:
|
|
|
|
if backend == options.backend:
|
|
|
|
break
|
|
|
|
else:
|
2014-12-01 06:12:15 -06:00
|
|
|
raise admintool.ScriptError(
|
2015-01-12 06:44:21 -06:00
|
|
|
"Backend %s not found in backup" % options.backend)
|
2014-12-01 06:12:15 -06:00
|
|
|
|
2013-03-13 08:36:41 -05:00
|
|
|
# Big fat warning
|
|
|
|
if (not options.unattended and
|
|
|
|
not user_input("Restoring data will overwrite existing live data. Continue to restore?", False)):
|
|
|
|
raise admintool.ScriptError("Aborted")
|
|
|
|
|
|
|
|
self.log.info(
|
|
|
|
"Each master will individually need to be re-initialized or")
|
|
|
|
self.log.info(
|
|
|
|
"re-created from this one. The replication agreements on")
|
|
|
|
self.log.info(
|
|
|
|
"masters running IPA 3.1 or earlier will need to be manually")
|
|
|
|
self.log.info(
|
|
|
|
"re-enabled. See the man page for details.")
|
|
|
|
|
|
|
|
self.log.info("Disabling all replication.")
|
|
|
|
self.disable_agreements()
|
|
|
|
|
2015-01-12 06:44:21 -06:00
|
|
|
if restore_type != 'FULL':
|
2013-03-13 08:36:41 -05:00
|
|
|
if not options.online:
|
|
|
|
self.log.info('Stopping Directory Server')
|
|
|
|
dirsrv.stop(capture_output=False)
|
|
|
|
else:
|
|
|
|
self.log.info('Starting Directory Server')
|
|
|
|
dirsrv.start(capture_output=False)
|
|
|
|
else:
|
|
|
|
self.log.info('Stopping IPA services')
|
2015-11-25 10:17:18 -06:00
|
|
|
result = run(['ipactl', 'stop'], raiseonerr=False)
|
|
|
|
if result.returncode not in [0, 6]:
|
2016-01-15 09:25:33 -06:00
|
|
|
self.log.warning('Stopping IPA failed: %s' % result.error_log)
|
2013-03-13 08:36:41 -05:00
|
|
|
|
2014-08-07 04:09:38 -05:00
|
|
|
self.restore_selinux_booleans()
|
|
|
|
|
2015-09-21 03:09:50 -05:00
|
|
|
http = httpinstance.HTTPInstance()
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
# We do either a full file restore or we restore data.
|
2015-01-12 06:44:21 -06:00
|
|
|
if restore_type == 'FULL':
|
2015-09-10 09:35:54 -05:00
|
|
|
self.remove_old_files()
|
2014-11-10 10:24:22 -06:00
|
|
|
self.cert_restore_prepare()
|
2013-03-13 08:36:41 -05:00
|
|
|
self.file_restore(options.no_logs)
|
2014-11-10 10:24:22 -06:00
|
|
|
self.cert_restore()
|
2013-03-13 08:36:41 -05:00
|
|
|
if 'CA' in self.backup_services:
|
|
|
|
self.__create_dogtag_log_dirs()
|
|
|
|
|
|
|
|
# Always restore the data from ldif
|
2015-11-09 11:28:47 -06:00
|
|
|
# We need to restore both userRoot and ipaca.
|
2014-12-01 06:12:15 -06:00
|
|
|
for instance, backend in databases:
|
|
|
|
self.ldif2db(instance, backend, online=options.online)
|
2013-03-13 08:36:41 -05:00
|
|
|
|
2015-01-12 06:44:21 -06:00
|
|
|
if restore_type != 'FULL':
|
2013-03-13 08:36:41 -05:00
|
|
|
if not options.online:
|
|
|
|
self.log.info('Starting Directory Server')
|
|
|
|
dirsrv.start(capture_output=False)
|
|
|
|
else:
|
2015-08-19 01:10:03 -05:00
|
|
|
# restore access controll configuration
|
|
|
|
auth_backup_path = os.path.join(paths.VAR_LIB_IPA, 'auth_backup')
|
|
|
|
if os.path.exists(auth_backup_path):
|
|
|
|
tasks.restore_auth_configuration(auth_backup_path)
|
2013-03-13 08:36:41 -05:00
|
|
|
# explicitly enable then disable the pki tomcatd service to
|
|
|
|
# re-register its instance. FIXME, this is really wierd.
|
2014-05-29 03:37:18 -05:00
|
|
|
services.knownservices.pki_tomcatd.enable()
|
|
|
|
services.knownservices.pki_tomcatd.disable()
|
2013-03-13 08:36:41 -05:00
|
|
|
|
2017-04-26 11:47:53 -05:00
|
|
|
self.log.info('Restarting GSS-proxy')
|
|
|
|
gssproxy = services.service('gssproxy', api)
|
|
|
|
gssproxy.reload_or_restart()
|
2013-03-13 08:36:41 -05:00
|
|
|
self.log.info('Starting IPA services')
|
|
|
|
run(['ipactl', 'start'])
|
|
|
|
self.log.info('Restarting SSSD')
|
2016-11-18 08:42:23 -06:00
|
|
|
sssd = services.service('sssd', api)
|
2013-03-13 08:36:41 -05:00
|
|
|
sssd.restart()
|
2016-11-29 10:10:22 -06:00
|
|
|
http.remove_httpd_ccaches()
|
2017-03-16 04:22:59 -05:00
|
|
|
# have the daemons pick up their restored configs
|
|
|
|
run([paths.SYSTEMCTL, "--system", "daemon-reload"])
|
2013-03-13 08:36:41 -05:00
|
|
|
finally:
|
|
|
|
try:
|
|
|
|
os.chdir(cwd)
|
2015-07-30 09:49:29 -05:00
|
|
|
except Exception as e:
|
2013-03-13 08:36:41 -05:00
|
|
|
self.log.error('Cannot change directory to %s: %s' % (cwd, e))
|
|
|
|
shutil.rmtree(self.top_dir)
|
|
|
|
|
|
|
|
|
|
|
|
def get_connection(self):
|
|
|
|
'''
|
|
|
|
Create an ldapi connection and bind to it using autobind as root.
|
|
|
|
'''
|
2015-08-18 05:47:46 -05:00
|
|
|
instance_name = installutils.realm_to_serverid(api.env.realm)
|
|
|
|
|
|
|
|
if not services.knownservices.dirsrv.is_running(instance_name):
|
|
|
|
raise admintool.ScriptError(
|
|
|
|
"directory server instance is not running/configured"
|
|
|
|
)
|
|
|
|
|
2013-03-13 08:36:41 -05:00
|
|
|
if self._conn is not None:
|
|
|
|
return self._conn
|
|
|
|
|
2016-11-01 08:52:33 -05:00
|
|
|
ldap_uri = ipaldap.get_ldap_uri(protocol='ldapi', realm=api.env.realm)
|
|
|
|
self._conn = ipaldap.LDAPClient(ldap_uri)
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
try:
|
2016-10-06 01:45:43 -05:00
|
|
|
self._conn.external_bind()
|
2015-07-30 09:49:29 -05:00
|
|
|
except Exception as e:
|
2013-03-13 08:36:41 -05:00
|
|
|
raise admintool.ScriptError('Unable to bind to LDAP server: %s'
|
|
|
|
% e)
|
|
|
|
return self._conn
|
|
|
|
|
|
|
|
|
|
|
|
def disable_agreements(self):
|
|
|
|
'''
|
|
|
|
Find all replication agreements on all masters and disable them.
|
|
|
|
|
|
|
|
Warn very loudly about any agreements/masters we cannot contact.
|
|
|
|
'''
|
|
|
|
try:
|
|
|
|
conn = self.get_connection()
|
2015-07-30 09:49:29 -05:00
|
|
|
except Exception as e:
|
2013-03-13 08:36:41 -05:00
|
|
|
self.log.error('Unable to get connection, skipping disabling agreements: %s' % e)
|
|
|
|
return
|
|
|
|
masters = []
|
|
|
|
dn = DN(('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
|
|
|
|
try:
|
|
|
|
entries = conn.get_entries(dn, conn.SCOPE_ONELEVEL)
|
2015-07-30 09:49:29 -05:00
|
|
|
except Exception as e:
|
2013-03-13 08:36:41 -05:00
|
|
|
raise admintool.ScriptError(
|
|
|
|
"Failed to read master data: %s" % e)
|
|
|
|
else:
|
2013-09-10 05:20:24 -05:00
|
|
|
masters = [ent.single_value['cn'] for ent in entries]
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
for master in masters:
|
|
|
|
if master == api.env.host:
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
repl = ReplicationManager(api.env.realm, master,
|
|
|
|
self.dirman_password)
|
2015-07-30 09:49:29 -05:00
|
|
|
except Exception as e:
|
2013-03-13 08:36:41 -05:00
|
|
|
self.log.critical("Unable to disable agreement on %s: %s" % (master, e))
|
2015-01-27 01:38:06 -06:00
|
|
|
continue
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
master_dn = DN(('cn', master), ('cn', 'masters'), ('cn', 'ipa'), ('cn', 'etc'), api.env.basedn)
|
|
|
|
try:
|
|
|
|
services = repl.conn.get_entries(master_dn,
|
|
|
|
repl.conn.SCOPE_ONELEVEL)
|
|
|
|
except errors.NotFound:
|
|
|
|
continue
|
|
|
|
|
2013-09-10 05:20:24 -05:00
|
|
|
services_cns = [s.single_value['cn'] for s in services]
|
2013-03-13 08:36:41 -05:00
|
|
|
|
2013-04-08 07:09:16 -05:00
|
|
|
host_entries = repl.find_ipa_replication_agreements()
|
2013-09-10 05:20:24 -05:00
|
|
|
hosts = [rep.single_value.get('nsds5replicahost')
|
2013-04-08 07:09:16 -05:00
|
|
|
for rep in host_entries]
|
|
|
|
|
2013-03-13 08:36:41 -05:00
|
|
|
for host in hosts:
|
|
|
|
self.log.info('Disabling replication agreement on %s to %s' % (master, host))
|
|
|
|
repl.disable_agreement(host)
|
|
|
|
|
|
|
|
if 'CA' in services_cns:
|
|
|
|
try:
|
|
|
|
repl = get_cs_replication_manager(api.env.realm, master,
|
|
|
|
self.dirman_password)
|
2015-07-30 09:49:29 -05:00
|
|
|
except Exception as e:
|
2013-03-13 08:36:41 -05:00
|
|
|
self.log.critical("Unable to disable agreement on %s: %s" % (master, e))
|
2015-01-27 01:38:06 -06:00
|
|
|
continue
|
2013-03-13 08:36:41 -05:00
|
|
|
|
2013-04-08 07:09:16 -05:00
|
|
|
host_entries = repl.find_ipa_replication_agreements()
|
2013-09-10 05:20:24 -05:00
|
|
|
hosts = [rep.single_value.get('nsds5replicahost')
|
2013-04-08 07:09:16 -05:00
|
|
|
for rep in host_entries]
|
2013-03-13 08:36:41 -05:00
|
|
|
for host in hosts:
|
|
|
|
self.log.info('Disabling CA replication agreement on %s to %s' % (master, host))
|
|
|
|
repl.hostnames = [master, host]
|
|
|
|
repl.disable_agreement(host)
|
|
|
|
|
|
|
|
|
|
|
|
def ldif2db(self, instance, backend, online=True):
|
|
|
|
'''
|
|
|
|
Restore a LDIF backup of the data in this instance.
|
|
|
|
|
|
|
|
If executed online create a task and wait for it to complete.
|
|
|
|
'''
|
|
|
|
self.log.info('Restoring from %s in %s' % (backend, instance))
|
|
|
|
|
|
|
|
cn = time.strftime('import_%Y_%m_%d_%H_%M_%S')
|
|
|
|
dn = DN(('cn', cn), ('cn', 'import'), ('cn', 'tasks'), ('cn', 'config'))
|
|
|
|
|
2015-01-20 05:22:29 -06:00
|
|
|
ldifdir = paths.SLAPD_INSTANCE_LDIF_DIR_TEMPLATE % instance
|
2013-03-13 08:36:41 -05:00
|
|
|
ldifname = '%s-%s.ldif' % (instance, backend)
|
2015-01-20 05:22:29 -06:00
|
|
|
ldiffile = os.path.join(ldifdir, ldifname)
|
2015-01-12 09:37:33 -06:00
|
|
|
srcldiffile = os.path.join(self.dir, ldifname)
|
|
|
|
|
2015-01-20 05:22:29 -06:00
|
|
|
if not os.path.exists(ldifdir):
|
2016-01-19 07:18:30 -06:00
|
|
|
pent = pwd.getpwnam(constants.DS_USER)
|
2015-12-09 06:40:04 -06:00
|
|
|
os.mkdir(ldifdir)
|
|
|
|
os.chmod(ldifdir, 0o770)
|
2015-01-20 05:22:29 -06:00
|
|
|
os.chown(ldifdir, pent.pw_uid, pent.pw_gid)
|
|
|
|
|
|
|
|
ipautil.backup_file(ldiffile)
|
2015-01-12 09:37:33 -06:00
|
|
|
with open(ldiffile, 'wb') as out_file:
|
|
|
|
ldif_writer = ldif.LDIFWriter(out_file)
|
|
|
|
with open(srcldiffile, 'rb') as in_file:
|
|
|
|
ldif_parser = RemoveRUVParser(in_file, ldif_writer, self.log)
|
|
|
|
ldif_parser.parse()
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
if online:
|
|
|
|
conn = self.get_connection()
|
|
|
|
ent = conn.make_entry(
|
|
|
|
dn,
|
|
|
|
{
|
|
|
|
'objectClass': ['top', 'extensibleObject'],
|
|
|
|
'cn': [cn],
|
|
|
|
'nsFilename': [ldiffile],
|
|
|
|
'nsUseOneFile': ['true'],
|
|
|
|
}
|
|
|
|
)
|
|
|
|
ent['nsInstance'] = [backend]
|
|
|
|
|
|
|
|
try:
|
|
|
|
conn.add_entry(ent)
|
2015-07-30 09:49:29 -05:00
|
|
|
except Exception as e:
|
2014-12-01 06:12:15 -06:00
|
|
|
self.log.error("Unable to bind to LDAP server: %s" % e)
|
|
|
|
return
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
self.log.info("Waiting for LDIF to finish")
|
|
|
|
wait_for_task(conn, dn)
|
|
|
|
else:
|
2015-04-01 10:27:36 -05:00
|
|
|
try:
|
|
|
|
os.makedirs(paths.VAR_LOG_DIRSRV_INSTANCE_TEMPLATE % instance)
|
|
|
|
except OSError as e:
|
|
|
|
pass
|
|
|
|
|
|
|
|
args = [paths.LDIF2DB,
|
|
|
|
'-Z', instance,
|
2014-12-01 06:12:15 -06:00
|
|
|
'-i', ldiffile,
|
|
|
|
'-n', backend]
|
2015-11-25 10:17:18 -06:00
|
|
|
result = run(args, raiseonerr=False)
|
|
|
|
if result.returncode != 0:
|
|
|
|
self.log.critical("ldif2db failed: %s" % result.error_log)
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
|
|
|
|
def bak2db(self, instance, backend, online=True):
|
|
|
|
'''
|
|
|
|
Restore a BAK backup of the data and changelog in this instance.
|
|
|
|
|
|
|
|
If backend is None then all backends are restored.
|
|
|
|
|
|
|
|
If executed online create a task and wait for it to complete.
|
|
|
|
|
|
|
|
instance here is a loaded term. It can mean either a separate
|
|
|
|
389-ds install instance or a separate 389-ds backend. We only need
|
2015-11-09 11:28:47 -06:00
|
|
|
to treat ipaca specially.
|
2013-03-13 08:36:41 -05:00
|
|
|
'''
|
|
|
|
if backend is not None:
|
|
|
|
self.log.info('Restoring %s in %s' % (backend, instance))
|
|
|
|
else:
|
|
|
|
self.log.info('Restoring %s' % instance)
|
|
|
|
|
|
|
|
cn = time.strftime('restore_%Y_%m_%d_%H_%M_%S')
|
|
|
|
|
|
|
|
dn = DN(('cn', cn), ('cn', 'restore'), ('cn', 'tasks'), ('cn', 'config'))
|
|
|
|
|
|
|
|
if online:
|
|
|
|
conn = self.get_connection()
|
|
|
|
ent = conn.make_entry(
|
|
|
|
dn,
|
|
|
|
{
|
|
|
|
'objectClass': ['top', 'extensibleObject'],
|
|
|
|
'cn': [cn],
|
|
|
|
'nsArchiveDir': [os.path.join(self.dir, instance)],
|
|
|
|
'nsDatabaseType': ['ldbm database'],
|
|
|
|
}
|
|
|
|
)
|
|
|
|
if backend is not None:
|
|
|
|
ent['nsInstance'] = [backend]
|
|
|
|
|
|
|
|
try:
|
|
|
|
conn.add_entry(ent)
|
2015-07-30 09:49:29 -05:00
|
|
|
except Exception as e:
|
2013-03-13 08:36:41 -05:00
|
|
|
raise admintool.ScriptError('Unable to bind to LDAP server: %s'
|
|
|
|
% e)
|
|
|
|
|
|
|
|
self.log.info("Waiting for restore to finish")
|
|
|
|
wait_for_task(conn, dn)
|
|
|
|
else:
|
2015-04-01 10:27:36 -05:00
|
|
|
args = [paths.BAK2DB,
|
|
|
|
'-Z', instance,
|
2013-03-13 08:36:41 -05:00
|
|
|
os.path.join(self.dir, instance)]
|
|
|
|
if backend is not None:
|
|
|
|
args.append('-n')
|
|
|
|
args.append(backend)
|
2015-11-25 10:17:18 -06:00
|
|
|
result = run(args, raiseonerr=False)
|
|
|
|
if result.returncode != 0:
|
|
|
|
self.log.critical("bak2db failed: %s" % result.error_log)
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
|
2015-03-04 09:06:47 -06:00
|
|
|
def restore_default_conf(self):
|
|
|
|
'''
|
|
|
|
Restore paths.IPA_DEFAULT_CONF to temporary directory.
|
|
|
|
|
|
|
|
Primary purpose of this method is to get cofiguration for api
|
|
|
|
finalization when restoring ipa after uninstall.
|
|
|
|
'''
|
|
|
|
cwd = os.getcwd()
|
|
|
|
os.chdir(self.dir)
|
|
|
|
args = ['tar',
|
|
|
|
'--xattrs',
|
|
|
|
'--selinux',
|
|
|
|
'-xzf',
|
|
|
|
os.path.join(self.dir, 'files.tar'),
|
|
|
|
paths.IPA_DEFAULT_CONF[1:],
|
|
|
|
]
|
|
|
|
|
2015-11-25 10:17:18 -06:00
|
|
|
result = run(args, raiseonerr=False)
|
|
|
|
if result.returncode != 0:
|
2015-03-04 09:06:47 -06:00
|
|
|
self.log.critical('Restoring %s failed: %s' %
|
2015-11-25 10:17:18 -06:00
|
|
|
(paths.IPA_DEFAULT_CONF, result.error_log))
|
2015-03-04 09:06:47 -06:00
|
|
|
os.chdir(cwd)
|
|
|
|
|
2015-09-10 09:35:54 -05:00
|
|
|
def remove_old_files(self):
|
|
|
|
"""
|
|
|
|
Removes all directories, files or temporal files that should be
|
|
|
|
removed before backup files are copied, to prevent errors.
|
|
|
|
"""
|
|
|
|
for d in self.DIRS_TO_BE_REMOVED:
|
|
|
|
try:
|
|
|
|
shutil.rmtree(d)
|
|
|
|
except OSError as e:
|
|
|
|
if e.errno != 2: # 2: dir does not exist
|
|
|
|
self.log.warning("Could not remove directory: %s (%s)",
|
|
|
|
d, e)
|
|
|
|
|
|
|
|
for f in self.FILES_TO_BE_REMOVED:
|
|
|
|
try:
|
|
|
|
os.remove(f)
|
|
|
|
except OSError as e:
|
|
|
|
if e.errno != 2: # 2: file does not exist
|
|
|
|
self.log.warning("Could not remove file: %s (%s)", f, e)
|
2015-03-04 09:06:47 -06:00
|
|
|
|
2013-03-13 08:36:41 -05:00
|
|
|
def file_restore(self, nologs=False):
|
|
|
|
'''
|
|
|
|
Restore all the files in the tarball.
|
|
|
|
|
|
|
|
This MUST be done offline because we directly backup the 389-ds
|
|
|
|
databases.
|
|
|
|
'''
|
|
|
|
self.log.info("Restoring files")
|
|
|
|
cwd = os.getcwd()
|
|
|
|
os.chdir('/')
|
|
|
|
args = ['tar',
|
2014-11-20 06:45:40 -06:00
|
|
|
'--xattrs',
|
|
|
|
'--selinux',
|
2013-03-13 08:36:41 -05:00
|
|
|
'-xzf',
|
|
|
|
os.path.join(self.dir, 'files.tar')
|
|
|
|
]
|
|
|
|
if nologs:
|
|
|
|
args.append('--exclude')
|
|
|
|
args.append('var/log')
|
|
|
|
|
2015-11-25 10:17:18 -06:00
|
|
|
result = run(args, raiseonerr=False)
|
|
|
|
if result.returncode != 0:
|
|
|
|
self.log.critical('Restoring files failed: %s', result.error_log)
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
os.chdir(cwd)
|
|
|
|
|
|
|
|
|
|
|
|
def read_header(self):
|
|
|
|
'''
|
|
|
|
Read the backup file header that contains the meta data about
|
|
|
|
this particular backup.
|
|
|
|
'''
|
2014-11-21 05:30:17 -06:00
|
|
|
with open(self.header) as fd:
|
|
|
|
config = SafeConfigParser()
|
|
|
|
config.readfp(fd)
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
self.backup_type = config.get('ipa', 'type')
|
|
|
|
self.backup_time = config.get('ipa', 'time')
|
|
|
|
self.backup_host = config.get('ipa', 'host')
|
|
|
|
self.backup_ipa_version = config.get('ipa', 'ipa_version')
|
|
|
|
self.backup_version = config.get('ipa', 'version')
|
2014-07-29 09:29:42 -05:00
|
|
|
self.backup_services = config.get('ipa', 'services').split(',')
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
|
|
|
|
def extract_backup(self, keyring=None):
|
|
|
|
'''
|
|
|
|
Extract the contents of the tarball backup into a temporary location,
|
|
|
|
decrypting if necessary.
|
|
|
|
'''
|
|
|
|
|
|
|
|
encrypt = False
|
|
|
|
filename = None
|
|
|
|
if self.backup_type == 'FULL':
|
|
|
|
filename = os.path.join(self.backup_dir, 'ipa-full.tar')
|
|
|
|
else:
|
|
|
|
filename = os.path.join(self.backup_dir, 'ipa-data.tar')
|
|
|
|
if not os.path.exists(filename):
|
|
|
|
if not os.path.exists(filename + '.gpg'):
|
|
|
|
raise admintool.ScriptError('Unable to find backup file in %s' % self.backup_dir)
|
|
|
|
else:
|
|
|
|
filename = filename + '.gpg'
|
|
|
|
encrypt = True
|
|
|
|
|
|
|
|
if encrypt:
|
|
|
|
self.log.info('Decrypting %s' % filename)
|
|
|
|
filename = decrypt_file(self.dir, filename, keyring)
|
|
|
|
|
|
|
|
os.chdir(self.dir)
|
|
|
|
|
|
|
|
args = ['tar',
|
2014-11-20 06:45:40 -06:00
|
|
|
'--xattrs',
|
|
|
|
'--selinux',
|
2013-03-13 08:36:41 -05:00
|
|
|
'-xzf',
|
|
|
|
filename,
|
|
|
|
'.'
|
|
|
|
]
|
|
|
|
run(args)
|
|
|
|
|
2016-01-19 07:18:30 -06:00
|
|
|
pent = pwd.getpwnam(constants.DS_USER)
|
2013-03-13 08:36:41 -05:00
|
|
|
os.chown(self.top_dir, pent.pw_uid, pent.pw_gid)
|
|
|
|
recursive_chown(self.dir, pent.pw_uid, pent.pw_gid)
|
|
|
|
|
|
|
|
if encrypt:
|
|
|
|
# We can remove the decoded tarball
|
|
|
|
os.unlink(filename)
|
|
|
|
|
|
|
|
def __create_dogtag_log_dirs(self):
|
|
|
|
"""
|
|
|
|
If we are doing a full restore and the dogtag log directories do
|
|
|
|
not exist then tomcat will fail to start.
|
|
|
|
|
|
|
|
The directory is different depending on whether we have a d9-based
|
2015-01-26 04:39:48 -06:00
|
|
|
or a d10-based installation.
|
2013-03-13 08:36:41 -05:00
|
|
|
"""
|
2015-01-26 04:39:48 -06:00
|
|
|
dirs = []
|
|
|
|
# dogtag 10
|
|
|
|
if (os.path.exists(paths.VAR_LIB_PKI_TOMCAT_DIR) and
|
|
|
|
not os.path.exists(paths.TOMCAT_TOPLEVEL_DIR)):
|
|
|
|
dirs += [paths.TOMCAT_TOPLEVEL_DIR,
|
|
|
|
paths.TOMCAT_CA_DIR,
|
|
|
|
paths.TOMCAT_CA_ARCHIVE_DIR,
|
|
|
|
paths.TOMCAT_SIGNEDAUDIT_DIR]
|
2013-03-13 08:36:41 -05:00
|
|
|
|
|
|
|
try:
|
2016-01-19 07:18:30 -06:00
|
|
|
pent = pwd.getpwnam(constants.PKI_USER)
|
2013-03-13 08:36:41 -05:00
|
|
|
except KeyError:
|
2016-01-19 07:18:30 -06:00
|
|
|
self.log.debug("No %s user exists, skipping CA directory creation",
|
|
|
|
constants.PKI_USER)
|
2013-03-13 08:36:41 -05:00
|
|
|
return
|
|
|
|
self.log.debug('Creating log directories for dogtag')
|
|
|
|
for dir in dirs:
|
|
|
|
try:
|
|
|
|
self.log.debug('Creating %s' % dir)
|
2015-12-09 06:40:04 -06:00
|
|
|
os.mkdir(dir)
|
|
|
|
os.chmod(dir, 0o770)
|
2013-03-13 08:36:41 -05:00
|
|
|
os.chown(dir, pent.pw_uid, pent.pw_gid)
|
2014-05-29 03:18:21 -05:00
|
|
|
tasks.restore_context(dir)
|
2015-07-30 09:49:29 -05:00
|
|
|
except Exception as e:
|
2013-03-13 08:36:41 -05:00
|
|
|
# This isn't so fatal as to side-track the restore
|
|
|
|
self.log.error('Problem with %s: %s' % (dir, e))
|
2014-08-07 04:09:38 -05:00
|
|
|
|
|
|
|
def restore_selinux_booleans(self):
|
|
|
|
bools = dict(httpinstance.SELINUX_BOOLEAN_SETTINGS)
|
|
|
|
if 'ADTRUST' in self.backup_services:
|
2014-11-10 06:29:58 -06:00
|
|
|
if adtrustinstance:
|
|
|
|
bools.update(adtrustinstance.SELINUX_BOOLEAN_SETTINGS)
|
|
|
|
else:
|
|
|
|
self.log.error(
|
|
|
|
'The AD trust package was not found, '
|
|
|
|
'not setting SELinux booleans.')
|
2014-08-07 04:09:38 -05:00
|
|
|
try:
|
|
|
|
tasks.set_selinux_booleans(bools)
|
|
|
|
except ipapython.errors.SetseboolError as e:
|
|
|
|
self.log.error('%s', e)
|
2014-11-10 10:24:22 -06:00
|
|
|
|
|
|
|
def cert_restore_prepare(self):
|
2014-12-03 03:12:18 -06:00
|
|
|
cainstance.CAInstance().stop_tracking_certificates()
|
2014-11-20 07:57:46 -06:00
|
|
|
httpinstance.HTTPInstance().stop_tracking_certificates()
|
2015-01-12 11:03:22 -06:00
|
|
|
try:
|
|
|
|
dsinstance.DsInstance().stop_tracking_certificates(
|
2015-04-27 07:42:31 -05:00
|
|
|
installutils.realm_to_serverid(api.env.realm))
|
2015-01-12 11:03:22 -06:00
|
|
|
except OSError:
|
|
|
|
# When IPA is not installed, DS NSS DB does not exist
|
|
|
|
pass
|
2014-11-20 07:57:46 -06:00
|
|
|
|
2017-01-20 01:33:22 -06:00
|
|
|
krbinstance.KrbInstance().stop_tracking_certs()
|
|
|
|
|
2014-11-10 10:24:22 -06:00
|
|
|
for basename in ('cert8.db', 'key3.db', 'secmod.db', 'pwdfile.txt'):
|
|
|
|
filename = os.path.join(paths.IPA_NSSDB_DIR, basename)
|
|
|
|
try:
|
|
|
|
ipautil.backup_file(filename)
|
|
|
|
except OSError as e:
|
|
|
|
self.log.error("Failed to backup %s: %s" % (filename, e))
|
|
|
|
|
|
|
|
tasks.remove_ca_certs_from_systemwide_ca_store()
|
|
|
|
|
|
|
|
def cert_restore(self):
|
2016-02-22 08:05:35 -06:00
|
|
|
try:
|
2016-11-22 23:23:47 -06:00
|
|
|
update_ipa_nssdb()
|
2016-02-22 08:05:35 -06:00
|
|
|
except RuntimeError as e:
|
|
|
|
self.log.error("%s", e)
|
2014-11-10 10:24:22 -06:00
|
|
|
|
|
|
|
tasks.reload_systemwide_ca_store()
|
2014-11-20 07:57:46 -06:00
|
|
|
|
|
|
|
services.knownservices.certmonger.restart()
|
2015-03-04 09:06:47 -06:00
|
|
|
|
|
|
|
def init_api(self, **overrides):
|
2016-11-28 09:24:33 -06:00
|
|
|
overrides.setdefault('confdir', paths.ETC_IPA)
|
2016-06-29 07:28:29 -05:00
|
|
|
api.bootstrap(in_server=True, context='restore', **overrides)
|
2015-03-04 09:06:47 -06:00
|
|
|
api.finalize()
|
|
|
|
|
2015-11-09 11:28:47 -06:00
|
|
|
self.instances = [installutils.realm_to_serverid(api.env.realm)]
|
2015-03-04 09:06:47 -06:00
|
|
|
self.backends = ['userRoot', 'ipaca']
|