2008-09-15 19:51:01 -05:00
|
|
|
# Authors: Rob Crittenden <rcritten@redhat.com>
|
|
|
|
#
|
|
|
|
# Copyright (C) 2008 Red Hat
|
|
|
|
# see file 'COPYING' for use and warranty information
|
|
|
|
#
|
2010-12-09 06:59:11 -06:00
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
2008-09-15 19:51:01 -05:00
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
2010-12-09 06:59:11 -06:00
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2008-09-15 19:51:01 -05:00
|
|
|
#
|
|
|
|
|
|
|
|
# Documentation can be found at http://freeipa.org/page/LdapUpdate
|
|
|
|
|
|
|
|
# TODO
|
|
|
|
# save undo files?
|
|
|
|
|
|
|
|
UPDATES_DIR="/usr/share/ipa/updates/"
|
|
|
|
|
|
|
|
import sys
|
2009-02-04 09:53:34 -06:00
|
|
|
from ipaserver.install import installutils
|
Move Managed Entries into their own container in the replicated space.
Repoint cn=Managed Entries,cn=plugins,cn=config in common_setup
Create: cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Definitions,cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Templates,cn=Managed Entries,cn=etc,$SUFFIX
Create method for dynamically migrating any and all custom Managed Entries
from the cn=config space into the new container.
Separate the connection creation during update so that a restart can
be performed to initialize changes before performing a delete.
Add wait_for_open_socket() method in installutils
https://fedorahosted.org/freeipa/ticket/1708
2011-09-08 14:07:26 -05:00
|
|
|
from ipaserver.install import service
|
2009-02-04 09:53:34 -06:00
|
|
|
from ipaserver import ipaldap
|
2009-02-05 14:03:08 -06:00
|
|
|
from ipapython import entity, ipautil
|
2010-10-26 09:26:06 -05:00
|
|
|
from ipalib import util
|
2009-04-23 07:51:59 -05:00
|
|
|
from ipalib import errors
|
2011-11-23 15:52:40 -06:00
|
|
|
from ipalib import api
|
2008-09-15 19:51:01 -05:00
|
|
|
import ldap
|
2010-04-16 15:23:45 -05:00
|
|
|
from ldap.dn import escape_dn_chars
|
2011-11-15 13:39:31 -06:00
|
|
|
from ipapython.ipa_log_manager import *
|
2008-09-15 19:51:01 -05:00
|
|
|
import krbV
|
|
|
|
import platform
|
|
|
|
import time
|
|
|
|
import random
|
|
|
|
import os
|
2010-05-27 10:58:31 -05:00
|
|
|
import pwd
|
2008-09-15 19:51:01 -05:00
|
|
|
import fnmatch
|
2009-05-13 16:02:09 -05:00
|
|
|
import csv
|
2011-11-23 15:52:40 -06:00
|
|
|
from ipaserver.install.plugins import PRE_UPDATE, POST_UPDATE
|
|
|
|
from ipaserver.install.plugins import FIRST, MIDDLE, LAST
|
2008-09-15 19:51:01 -05:00
|
|
|
|
|
|
|
class BadSyntax(Exception):
|
|
|
|
def __init__(self, value):
|
|
|
|
self.value = value
|
|
|
|
def __str__(self):
|
|
|
|
return repr(self.value)
|
|
|
|
|
2008-09-19 22:09:09 -05:00
|
|
|
class LDAPUpdate:
|
2010-05-27 10:58:31 -05:00
|
|
|
def __init__(self, dm_password, sub_dict={}, live_run=True,
|
2011-11-23 15:52:40 -06:00
|
|
|
online=True, ldapi=False, plugins=False):
|
2008-09-15 19:51:01 -05:00
|
|
|
"""dm_password = Directory Manager password
|
|
|
|
sub_dict = substitution dictionary
|
|
|
|
live_run = Apply the changes or just test
|
2010-05-27 10:58:31 -05:00
|
|
|
online = do an online LDAP update or use an experimental LDIF updater
|
|
|
|
ldapi = bind using ldapi. This assumes autobind is enabled.
|
2011-11-23 15:52:40 -06:00
|
|
|
plugins = execute the pre/post update plugins
|
2008-09-15 19:51:01 -05:00
|
|
|
"""
|
|
|
|
self.sub_dict = sub_dict
|
|
|
|
self.live_run = live_run
|
|
|
|
self.dm_password = dm_password
|
|
|
|
self.conn = None
|
2008-09-17 22:18:09 -05:00
|
|
|
self.modified = False
|
2010-05-27 10:58:31 -05:00
|
|
|
self.online = online
|
|
|
|
self.ldapi = ldapi
|
2011-11-23 15:52:40 -06:00
|
|
|
self.plugins = plugins
|
2010-05-27 10:58:31 -05:00
|
|
|
self.pw_name = pwd.getpwuid(os.geteuid()).pw_name
|
2008-09-15 19:51:01 -05:00
|
|
|
|
2011-03-14 14:22:27 -05:00
|
|
|
if sub_dict.get("REALM"):
|
|
|
|
self.realm = sub_dict["REALM"]
|
|
|
|
else:
|
|
|
|
krbctx = krbV.default_context()
|
|
|
|
try:
|
|
|
|
self.realm = krbctx.default_realm
|
|
|
|
suffix = util.realm_to_suffix(self.realm)
|
|
|
|
except krbV.Krb5Error:
|
|
|
|
self.realm = None
|
|
|
|
suffix = None
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
domain = ipautil.get_domain_name()
|
|
|
|
libarch = self.__identify_arch()
|
|
|
|
|
2011-02-15 13:11:27 -06:00
|
|
|
if not self.ldapi:
|
|
|
|
fqdn = installutils.get_fqdn()
|
|
|
|
if fqdn is None:
|
|
|
|
raise RuntimeError("Unable to determine hostname")
|
|
|
|
else:
|
|
|
|
fqdn = "ldapi://%%2fvar%%2frun%%2fslapd-%s.socket" % "-".join(
|
2011-03-14 14:22:27 -05:00
|
|
|
self.realm.split(".")
|
2011-02-15 13:11:27 -06:00
|
|
|
)
|
|
|
|
|
2011-02-10 21:26:46 -06:00
|
|
|
if not self.sub_dict.get("REALM") and self.realm is not None:
|
|
|
|
self.sub_dict["REALM"] = self.realm
|
2008-09-15 19:51:01 -05:00
|
|
|
if not self.sub_dict.get("FQDN"):
|
|
|
|
self.sub_dict["FQDN"] = fqdn
|
|
|
|
if not self.sub_dict.get("DOMAIN"):
|
|
|
|
self.sub_dict["DOMAIN"] = domain
|
2011-02-10 21:26:46 -06:00
|
|
|
if not self.sub_dict.get("SUFFIX") and suffix is not None:
|
2008-09-15 19:51:01 -05:00
|
|
|
self.sub_dict["SUFFIX"] = suffix
|
2010-04-16 15:23:45 -05:00
|
|
|
if not self.sub_dict.get("ESCAPED_SUFFIX"):
|
|
|
|
self.sub_dict["ESCAPED_SUFFIX"] = escape_dn_chars(suffix)
|
2008-09-15 19:51:01 -05:00
|
|
|
if not self.sub_dict.get("LIBARCH"):
|
|
|
|
self.sub_dict["LIBARCH"] = libarch
|
|
|
|
if not self.sub_dict.get("TIME"):
|
|
|
|
self.sub_dict["TIME"] = int(time.time())
|
2011-02-15 13:11:27 -06:00
|
|
|
if not self.sub_dict.get("DOMAIN") and domain is not None:
|
|
|
|
self.sub_dict["DOMAIN"] = domain
|
2008-09-15 19:51:01 -05:00
|
|
|
|
2010-05-27 10:58:31 -05:00
|
|
|
if online:
|
2011-04-11 14:30:11 -05:00
|
|
|
# Try out the connection/password
|
|
|
|
try:
|
|
|
|
conn = ipaldap.IPAdmin(fqdn, ldapi=self.ldapi, realm=self.realm)
|
|
|
|
if self.dm_password:
|
2011-02-15 13:11:27 -06:00
|
|
|
conn.do_simple_bind(binddn="cn=directory manager", bindpw=self.dm_password)
|
2011-04-11 14:30:11 -05:00
|
|
|
elif os.getegid() == 0:
|
|
|
|
try:
|
2011-03-18 10:19:53 -05:00
|
|
|
# autobind
|
|
|
|
conn.do_external_bind(self.pw_name)
|
2011-04-11 14:30:11 -05:00
|
|
|
except errors.NotFound:
|
|
|
|
# Fall back
|
2011-03-18 10:19:53 -05:00
|
|
|
conn.do_sasl_gssapi_bind()
|
2011-04-11 14:30:11 -05:00
|
|
|
else:
|
|
|
|
conn.do_sasl_gssapi_bind()
|
|
|
|
conn.unbind()
|
|
|
|
except (ldap.CONNECT_ERROR, ldap.SERVER_DOWN):
|
|
|
|
raise RuntimeError("Unable to connect to LDAP server %s" % fqdn)
|
|
|
|
except ldap.INVALID_CREDENTIALS:
|
|
|
|
raise RuntimeError("The password provided is incorrect for LDAP server %s" % fqdn)
|
|
|
|
except ldap.LOCAL_ERROR, e:
|
|
|
|
raise RuntimeError('%s' % e.args[0].get('info', '').strip())
|
2010-05-27 10:58:31 -05:00
|
|
|
else:
|
|
|
|
raise RuntimeError("Offline updates are not supported.")
|
2008-09-15 19:51:01 -05:00
|
|
|
|
2009-05-13 16:02:09 -05:00
|
|
|
# The following 2 functions were taken from the Python
|
|
|
|
# documentation at http://docs.python.org/library/csv.html
|
|
|
|
def __utf_8_encoder(self, unicode_csv_data):
|
|
|
|
for line in unicode_csv_data:
|
|
|
|
yield line.encode('utf-8')
|
|
|
|
|
|
|
|
def __unicode_csv_reader(self, unicode_csv_data, quote_char="'", dialect=csv.excel, **kwargs):
|
|
|
|
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
|
|
|
|
csv_reader = csv.reader(self.__utf_8_encoder(unicode_csv_data),
|
2011-07-18 15:02:04 -05:00
|
|
|
dialect=dialect, delimiter=',',
|
2009-05-13 16:02:09 -05:00
|
|
|
quotechar=quote_char,
|
|
|
|
skipinitialspace=True,
|
|
|
|
**kwargs)
|
|
|
|
for row in csv_reader:
|
|
|
|
# decode UTF-8 back to Unicode, cell by cell:
|
|
|
|
yield [unicode(cell, 'utf-8') for cell in row]
|
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
def __identify_arch(self):
|
|
|
|
"""On multi-arch systems some libraries may be in /lib64, /usr/lib64,
|
|
|
|
etc. Determine if a suffix is needed based on the current
|
|
|
|
architecture.
|
|
|
|
"""
|
2008-09-18 15:58:10 -05:00
|
|
|
bits = platform.architecture()[0]
|
2008-09-15 19:51:01 -05:00
|
|
|
|
2008-09-18 15:58:10 -05:00
|
|
|
if bits == "64bit":
|
2008-09-15 19:51:01 -05:00
|
|
|
return "64"
|
|
|
|
else:
|
|
|
|
return ""
|
|
|
|
|
2009-09-14 15:12:58 -05:00
|
|
|
def _template_str(self, s):
|
2008-09-15 19:51:01 -05:00
|
|
|
try:
|
|
|
|
return ipautil.template_str(s, self.sub_dict)
|
|
|
|
except KeyError, e:
|
|
|
|
raise BadSyntax("Unknown template keyword %s" % e)
|
|
|
|
|
|
|
|
def __parse_values(self, line):
|
|
|
|
"""Parse a comma-separated string into separate values and convert them
|
|
|
|
into a list. This should handle quoted-strings with embedded commas
|
|
|
|
"""
|
2009-05-13 16:02:09 -05:00
|
|
|
if line[0] == "'":
|
|
|
|
quote_char = "'"
|
|
|
|
else:
|
|
|
|
quote_char = '"'
|
|
|
|
reader = self.__unicode_csv_reader([line], quote_char)
|
|
|
|
value = []
|
|
|
|
for row in reader:
|
|
|
|
value = value + row
|
|
|
|
return value
|
2008-09-15 19:51:01 -05:00
|
|
|
|
|
|
|
def read_file(self, filename):
|
|
|
|
if filename == '-':
|
|
|
|
fd = sys.stdin
|
|
|
|
else:
|
|
|
|
fd = open(filename)
|
|
|
|
text = fd.readlines()
|
|
|
|
if fd != sys.stdin: fd.close()
|
|
|
|
return text
|
|
|
|
|
|
|
|
def __entry_to_entity(self, ent):
|
|
|
|
"""Tne Entry class is a bare LDAP entry. The Entity class has a lot more
|
|
|
|
helper functions that we need, so convert to dict and then to Entity.
|
|
|
|
"""
|
|
|
|
entry = dict(ent.data)
|
|
|
|
entry['dn'] = ent.dn
|
|
|
|
for key,value in entry.iteritems():
|
|
|
|
if isinstance(value,list) or isinstance(value,tuple):
|
|
|
|
if len(value) == 0:
|
|
|
|
entry[key] = ''
|
|
|
|
elif len(value) == 1:
|
|
|
|
entry[key] = value[0]
|
|
|
|
return entity.Entity(entry)
|
|
|
|
|
|
|
|
def __combine_updates(self, dn_list, all_updates, update):
|
|
|
|
"""Combine a new update with the list of total updates
|
|
|
|
|
|
|
|
Updates are stored in 2 lists:
|
|
|
|
dn_list: contains a unique list of DNs in the updates
|
|
|
|
all_updates: the actual updates that need to be applied
|
|
|
|
|
|
|
|
We want to apply the updates from the shortest to the longest
|
|
|
|
path so if new child and parent entries are in different updates
|
|
|
|
we can be sure the parent gets written first. This also lets
|
|
|
|
us apply any schema first since it is in the very short cn=schema.
|
|
|
|
"""
|
|
|
|
dn = update.get('dn')
|
|
|
|
dns = ldap.explode_dn(dn.lower())
|
|
|
|
l = len(dns)
|
|
|
|
if dn_list.get(l):
|
|
|
|
if dn not in dn_list[l]:
|
|
|
|
dn_list[l].append(dn)
|
|
|
|
else:
|
|
|
|
dn_list[l] = [dn]
|
|
|
|
if not all_updates.get(dn):
|
|
|
|
all_updates[dn] = update
|
|
|
|
return all_updates
|
|
|
|
|
|
|
|
e = all_updates[dn]
|
2011-02-22 08:21:14 -06:00
|
|
|
if 'default' in update:
|
|
|
|
if 'default' in e:
|
|
|
|
e['default'] = e['default'] + update['default']
|
|
|
|
else:
|
|
|
|
e['default'] = update['default']
|
|
|
|
elif 'updates' in update:
|
|
|
|
if 'updates' in e:
|
|
|
|
e['updates'] = e['updates'] + update['updates']
|
|
|
|
else:
|
|
|
|
e['updates'] = update['updates']
|
|
|
|
else:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug("Unknown key in updates %s" % update.keys())
|
2008-09-15 19:51:01 -05:00
|
|
|
|
|
|
|
all_updates[dn] = e
|
|
|
|
|
|
|
|
return all_updates
|
|
|
|
|
|
|
|
def parse_update_file(self, data, all_updates, dn_list):
|
|
|
|
"""Parse the update file into a dictonary of lists and apply the update
|
|
|
|
for each DN in the file."""
|
2011-10-05 16:16:05 -05:00
|
|
|
valid_keywords = ["default", "add", "remove", "only", "deleteentry", "replace", "addifnew", "addifexist"]
|
2008-09-15 19:51:01 -05:00
|
|
|
update = {}
|
|
|
|
d = ""
|
|
|
|
index = ""
|
|
|
|
dn = None
|
|
|
|
lcount = 0
|
|
|
|
for line in data:
|
|
|
|
# Strip out \n and extra white space
|
|
|
|
lcount = lcount + 1
|
|
|
|
|
|
|
|
# skip comments and empty lines
|
|
|
|
line = line.rstrip()
|
|
|
|
if line.startswith('#') or line == '': continue
|
|
|
|
|
|
|
|
if line.lower().startswith('dn:'):
|
|
|
|
if dn is not None:
|
|
|
|
all_updates = self.__combine_updates(dn_list, all_updates, update)
|
|
|
|
|
|
|
|
update = {}
|
|
|
|
dn = line[3:].strip()
|
2009-09-14 15:12:58 -05:00
|
|
|
update['dn'] = self._template_str(dn)
|
2008-09-15 19:51:01 -05:00
|
|
|
else:
|
|
|
|
if dn is None:
|
|
|
|
raise BadSyntax, "dn is not defined in the update"
|
|
|
|
|
2009-09-14 15:12:58 -05:00
|
|
|
line = self._template_str(line)
|
2008-09-15 19:51:01 -05:00
|
|
|
if line.startswith(' '):
|
|
|
|
v = d[len(d) - 1]
|
2009-03-23 14:14:31 -05:00
|
|
|
v = v + line[1:]
|
2008-09-15 19:51:01 -05:00
|
|
|
d[len(d) - 1] = v
|
|
|
|
update[index] = d
|
|
|
|
continue
|
|
|
|
line = line.strip()
|
|
|
|
values = line.split(':', 2)
|
|
|
|
if len(values) != 3:
|
|
|
|
raise BadSyntax, "Bad formatting on line %d: %s" % (lcount,line)
|
|
|
|
|
|
|
|
index = values[0].strip().lower()
|
|
|
|
|
|
|
|
if index not in valid_keywords:
|
|
|
|
raise BadSyntax, "Unknown keyword %s" % index
|
|
|
|
|
|
|
|
attr = values[1].strip()
|
|
|
|
value = values[2].strip()
|
|
|
|
|
|
|
|
new_value = ""
|
|
|
|
if index == "default":
|
|
|
|
new_value = attr + ":" + value
|
|
|
|
else:
|
|
|
|
new_value = index + ":" + attr + ":" + value
|
|
|
|
index = "updates"
|
|
|
|
|
|
|
|
d = update.get(index, [])
|
|
|
|
|
|
|
|
d.append(new_value)
|
|
|
|
|
|
|
|
update[index] = d
|
|
|
|
|
|
|
|
if dn is not None:
|
|
|
|
all_updates = self.__combine_updates(dn_list, all_updates, update)
|
|
|
|
|
|
|
|
return (all_updates, dn_list)
|
|
|
|
|
|
|
|
def create_index_task(self, attribute):
|
|
|
|
"""Create a task to update an index for an attribute"""
|
2009-10-12 12:54:08 -05:00
|
|
|
|
|
|
|
# Sleep a bit to ensure previous operations are complete
|
2011-02-22 08:21:14 -06:00
|
|
|
if self.live_run:
|
|
|
|
time.sleep(5)
|
2008-09-15 19:51:01 -05:00
|
|
|
|
|
|
|
r = random.SystemRandom()
|
|
|
|
|
|
|
|
# Refresh the time to make uniqueness more probable. Add on some
|
|
|
|
# randomness for good measure.
|
|
|
|
self.sub_dict['TIME'] = int(time.time()) + r.randint(0,10000)
|
|
|
|
|
2009-09-14 15:12:58 -05:00
|
|
|
cn = self._template_str("indextask_$TIME")
|
2008-09-15 19:51:01 -05:00
|
|
|
dn = "cn=%s, cn=index, cn=tasks, cn=config" % cn
|
|
|
|
|
|
|
|
e = ipaldap.Entry(dn)
|
|
|
|
|
|
|
|
e.setValues('objectClass', ['top', 'extensibleObject'])
|
|
|
|
e.setValue('cn', cn)
|
|
|
|
e.setValue('nsInstance', 'userRoot')
|
|
|
|
e.setValues('nsIndexAttribute', attribute)
|
|
|
|
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.info("Creating task to index attribute: %s", attribute)
|
|
|
|
root_logger.debug("Task id: %s", dn)
|
2008-09-15 19:51:01 -05:00
|
|
|
|
|
|
|
if self.live_run:
|
Ticket #1879 - IPAdmin undefined anonymous parameter lists
The IPAdmin class in ipaserver/ipaldap.py has methods with anonymous
undefined parameter lists.
For example:
def getList(self,*args):
In Python syntax this means you can call getList with any positional
parameter list you want.
This is bad because:
1) It's not true, *args gets passed to an ldap function with a well
defined parameter list, so you really do have to call it with a
defined parameter list. *args will let you pass anything, but once it
gets passed to the ldap function it will blow up if the parameters do
not match (what parameters are those you're wondering? see item 2).
2) The programmer does not know what the valid parameters are unless
they are defined in the formal parameter list.
3) Without a formal parameter list automatic documentation generators
cannot produce API documentation (see item 2)
4) The Python interpreter cannot validate the parameters being passed
because there is no formal parameter list. Note, Python does not
validate the type of parameters, but it does validate the correct
number of postitional parameters are passed and only defined keyword
parameters are passed. Bypassing the language support facilities leads
to programming errors.
5) Without a formal parameter list program checkers such as pylint
cannot validate the program which leads to progamming errors.
6) Without a formal parameter list which includes default keyword
parameters it's not possible to use keyword arguments nor to know what
their default values are (see item 2). One is forced to pass a keyword
argument as a positional argument, plus you must then pass every
keyword argument between the end of the positional argument list and
keyword arg of interest even of the other keyword arguments are not of
interest. This also demands you know what the default value of the
intermediate keyword arguments are (see item 2) and hope they don't
change.
Also the *args anonymous tuple get passed into the error handling code
so it can report what the called values were. But because the tuple is
anonymous the error handler cannot not describe what it was passed. In
addition the error handling code makes assumptions about the possible
contents of the anonymous tuple based on current practice instead of
actual defined values. Things like "if the number of items in the
tuple is 2 or less then the first tuple item must be a dn
(Distinguished Name)" or "if the number of items in the tuple is
greater than 2 then the 3rd item must be an ldap search filter". These
are constructs which are not robust and will fail at some point in the
future.
This patch also fixes the use of IPAdmin.addEntry(). It was sometimes
being called with (dn, modlist), sometimes a Entry object, or
sometimes a Entity object. Now it's always called with either a Entry
or Entity object and IPAdmin.addEntry() validates the type of the
parameter passed.
2011-09-26 09:39:15 -05:00
|
|
|
self.conn.addEntry(e)
|
2008-09-15 19:51:01 -05:00
|
|
|
|
|
|
|
return dn
|
|
|
|
|
|
|
|
def monitor_index_task(self, dn):
|
|
|
|
"""Give a task DN monitor it and wait until it has completed (or failed)
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not self.live_run:
|
|
|
|
# If not doing this live there is nothing to monitor
|
|
|
|
return
|
|
|
|
|
|
|
|
# Pause for a moment to give the task time to be created
|
|
|
|
time.sleep(1)
|
|
|
|
|
|
|
|
attrlist = ['nstaskstatus', 'nstaskexitcode']
|
|
|
|
entry = None
|
|
|
|
|
2008-09-18 15:58:10 -05:00
|
|
|
while True:
|
2008-09-15 19:51:01 -05:00
|
|
|
try:
|
|
|
|
entry = self.conn.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)", attrlist)
|
2009-04-23 07:51:59 -05:00
|
|
|
except errors.NotFound, e:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.error("Task not found: %s", dn)
|
2008-09-15 19:51:01 -05:00
|
|
|
return
|
2009-04-23 07:51:59 -05:00
|
|
|
except errors.DatabaseError, e:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.error("Task lookup failure %s", e)
|
2008-09-15 19:51:01 -05:00
|
|
|
return
|
|
|
|
|
|
|
|
status = entry.getValue('nstaskstatus')
|
|
|
|
if status is None:
|
|
|
|
# task doesn't have a status yet
|
|
|
|
time.sleep(1)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if status.lower().find("finished") > -1:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.info("Indexing finished")
|
2008-09-18 15:58:10 -05:00
|
|
|
break
|
2008-09-15 19:51:01 -05:00
|
|
|
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug("Indexing in progress")
|
2008-09-15 19:51:01 -05:00
|
|
|
time.sleep(1)
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
def __create_default_entry(self, dn, default):
|
|
|
|
"""Create the default entry from the values provided.
|
|
|
|
|
|
|
|
The return type is entity.Entity
|
|
|
|
"""
|
|
|
|
entry = ipaldap.Entry(dn)
|
|
|
|
|
|
|
|
if not default:
|
|
|
|
# This means that the entire entry needs to be created with add
|
|
|
|
return self.__entry_to_entity(entry)
|
|
|
|
|
|
|
|
for line in default:
|
|
|
|
# We already do syntax-parsing so this is safe
|
|
|
|
(k, v) = line.split(':',1)
|
|
|
|
e = entry.getValues(k)
|
|
|
|
if e:
|
|
|
|
# multi-valued attribute
|
|
|
|
e = list(e)
|
|
|
|
e.append(v)
|
|
|
|
else:
|
|
|
|
e = v
|
|
|
|
entry.setValues(k, e)
|
|
|
|
|
|
|
|
return self.__entry_to_entity(entry)
|
|
|
|
|
|
|
|
def __get_entry(self, dn):
|
|
|
|
"""Retrieve an object from LDAP.
|
|
|
|
|
|
|
|
The return type is ipaldap.Entry
|
|
|
|
"""
|
|
|
|
searchfilter="objectclass=*"
|
2011-04-05 15:28:59 -05:00
|
|
|
sattrs = ["*", "aci", "attributeTypes", "objectClasses"]
|
2008-09-15 19:51:01 -05:00
|
|
|
scope = ldap.SCOPE_BASE
|
|
|
|
|
|
|
|
return self.conn.getList(dn, scope, searchfilter, sattrs)
|
|
|
|
|
Move Managed Entries into their own container in the replicated space.
Repoint cn=Managed Entries,cn=plugins,cn=config in common_setup
Create: cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Definitions,cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Templates,cn=Managed Entries,cn=etc,$SUFFIX
Create method for dynamically migrating any and all custom Managed Entries
from the cn=config space into the new container.
Separate the connection creation during update so that a restart can
be performed to initialize changes before performing a delete.
Add wait_for_open_socket() method in installutils
https://fedorahosted.org/freeipa/ticket/1708
2011-09-08 14:07:26 -05:00
|
|
|
def __update_managed_entries(self):
|
|
|
|
"""Update and move legacy Managed Entry Plugins."""
|
|
|
|
|
|
|
|
suffix = ipautil.realm_to_suffix(self.realm)
|
|
|
|
searchfilter = '(objectclass=*)'
|
|
|
|
definitions_managed_entries = []
|
|
|
|
old_template_container = 'cn=etc,%s' % suffix
|
|
|
|
old_definition_container = 'cn=Managed Entries,cn=plugins,cn=config'
|
|
|
|
new = 'cn=Managed Entries,cn=etc,%s' % suffix
|
|
|
|
sub = ['cn=Definitions,', 'cn=Templates,']
|
|
|
|
new_managed_entries = []
|
|
|
|
old_templates = []
|
|
|
|
template = None
|
|
|
|
try:
|
|
|
|
definitions_managed_entries = self.conn.getList(old_definition_container, ldap.SCOPE_ONELEVEL, searchfilter,[])
|
|
|
|
except errors.NotFound, e:
|
|
|
|
return new_managed_entries
|
|
|
|
for entry in definitions_managed_entries:
|
|
|
|
new_definition = {}
|
|
|
|
definition_managed_entry_updates = {}
|
|
|
|
definitions_managed_entries
|
|
|
|
old_definition = {'dn': entry.dn, 'deleteentry': ['dn: %s' % entry.dn]}
|
|
|
|
old_template = entry.getValue('managedtemplate')
|
|
|
|
entry.setValues('managedtemplate', entry.getValue('managedtemplate').replace(old_template_container, sub[1] + new))
|
|
|
|
new_definition['dn'] = entry.dn.replace(old_definition_container, sub[0] + new)
|
|
|
|
new_definition['default'] = str(entry).strip().replace(': ', ':').split('\n')[1:]
|
|
|
|
definition_managed_entry_updates[new_definition['dn']] = new_definition
|
|
|
|
definition_managed_entry_updates[old_definition['dn']] = old_definition
|
|
|
|
old_templates.append(old_template)
|
|
|
|
new_managed_entries.append(definition_managed_entry_updates)
|
|
|
|
for old_template in old_templates:
|
|
|
|
try:
|
|
|
|
template = self.conn.getEntry(old_template, ldap.SCOPE_BASE, searchfilter,[])
|
|
|
|
new_template = {}
|
|
|
|
template_managed_entry_updates = {}
|
|
|
|
old_template = {'dn': template.dn, 'deleteentry': ['dn: %s' % template.dn]}
|
|
|
|
new_template['dn'] = template.dn.replace(old_template_container, sub[1] + new)
|
|
|
|
new_template['default'] = str(template).strip().replace(': ', ':').split('\n')[1:]
|
|
|
|
template_managed_entry_updates[new_template['dn']] = new_template
|
|
|
|
template_managed_entry_updates[old_template['dn']] = old_template
|
|
|
|
new_managed_entries.append(template_managed_entry_updates)
|
|
|
|
except errors.NotFound, e:
|
|
|
|
pass
|
|
|
|
if len(new_managed_entries) > 0:
|
|
|
|
new_managed_entries.sort(reverse=True)
|
|
|
|
|
|
|
|
return new_managed_entries
|
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
def __apply_updates(self, updates, entry):
|
|
|
|
"""updates is a list of changes to apply
|
|
|
|
entry is the thing to apply them to
|
|
|
|
|
2009-09-14 15:12:58 -05:00
|
|
|
Returns the modified entry
|
2008-09-15 19:51:01 -05:00
|
|
|
"""
|
|
|
|
if not updates:
|
|
|
|
return entry
|
|
|
|
|
|
|
|
only = {}
|
|
|
|
for u in updates:
|
|
|
|
# We already do syntax-parsing so this is safe
|
|
|
|
(utype, k, values) = u.split(':',2)
|
|
|
|
values = self.__parse_values(values)
|
|
|
|
|
|
|
|
e = entry.getValues(k)
|
|
|
|
if not isinstance(e, list):
|
|
|
|
if e is None:
|
|
|
|
e = []
|
|
|
|
else:
|
|
|
|
e = [e]
|
|
|
|
for v in values:
|
|
|
|
if utype == 'remove':
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug("remove: '%s' from %s, current value %s", v, k, e)
|
2008-09-15 19:51:01 -05:00
|
|
|
try:
|
|
|
|
e.remove(v)
|
|
|
|
except ValueError:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.warning("remove: '%s' not in %s", v, k)
|
2008-09-15 19:51:01 -05:00
|
|
|
pass
|
|
|
|
entry.setValues(k, e)
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug('remove: updated value %s', e)
|
2008-09-15 19:51:01 -05:00
|
|
|
elif utype == 'add':
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug("add: '%s' to %s, current value %s", v, k, e)
|
2008-09-15 19:51:01 -05:00
|
|
|
# Remove it, ignoring errors so we can blindly add it later
|
|
|
|
try:
|
|
|
|
e.remove(v)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
e.append(v)
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug('add: updated value %s', e)
|
2008-09-15 19:51:01 -05:00
|
|
|
entry.setValues(k, e)
|
2011-04-14 13:37:45 -05:00
|
|
|
elif utype == 'addifnew':
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug("addifnew: '%s' to %s, current value %s", v, k, e)
|
2011-04-14 13:37:45 -05:00
|
|
|
# Only add the attribute if it doesn't exist. Only works
|
|
|
|
# with single-value attributes.
|
|
|
|
if len(e) == 0:
|
|
|
|
e.append(v)
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug('addifnew: set %s to %s', k, e)
|
2011-04-14 13:37:45 -05:00
|
|
|
entry.setValues(k, e)
|
2011-10-05 16:16:05 -05:00
|
|
|
elif utype == 'addifexist':
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug("addifexist: '%s' to %s, current value %s", v, k, e)
|
2011-10-05 16:16:05 -05:00
|
|
|
# Only add the attribute if the entry doesn't exist. We
|
|
|
|
# determine this based on whether it has an objectclass
|
|
|
|
if entry.getValues('objectclass'):
|
|
|
|
e.append(v)
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug('addifexist: set %s to %s', k, e)
|
2011-10-05 16:16:05 -05:00
|
|
|
entry.setValues(k, e)
|
2008-09-15 19:51:01 -05:00
|
|
|
elif utype == 'only':
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug("only: set %s to '%s', current value %s", k, v, e)
|
2008-09-15 19:51:01 -05:00
|
|
|
if only.get(k):
|
|
|
|
e.append(v)
|
|
|
|
else:
|
2008-10-15 14:13:27 -05:00
|
|
|
e = [v]
|
2008-09-15 19:51:01 -05:00
|
|
|
only[k] = True
|
|
|
|
entry.setValues(k, e)
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug('only: updated value %s', e)
|
2009-09-14 15:12:58 -05:00
|
|
|
elif utype == 'deleteentry':
|
|
|
|
# skip this update type, it occurs in __delete_entries()
|
|
|
|
return None
|
2011-02-11 12:29:55 -06:00
|
|
|
elif utype == 'replace':
|
2011-11-23 15:52:40 -06:00
|
|
|
# v has the format "old::new"
|
2011-02-11 12:29:55 -06:00
|
|
|
try:
|
2011-07-01 09:41:42 -05:00
|
|
|
(old, new) = v.split('::', 1)
|
2011-02-11 12:29:55 -06:00
|
|
|
except ValueError:
|
2011-11-23 15:52:40 -06:00
|
|
|
raise BadSyntax, "bad syntax in replace, needs to be in the format old::new in %s" % v
|
2011-02-11 12:29:55 -06:00
|
|
|
try:
|
|
|
|
e.remove(old)
|
|
|
|
e.append(new)
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug('replace: updated value %s', e)
|
2011-02-11 12:29:55 -06:00
|
|
|
entry.setValues(k, e)
|
|
|
|
except ValueError:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug('replace: %s not found, skipping', old)
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
self.print_entity(entry)
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
return entry
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
def print_entity(self, e, message=None):
|
|
|
|
"""The entity object currently lacks a str() method"""
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug("---------------------------------------------")
|
2008-09-15 19:51:01 -05:00
|
|
|
if message:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug("%s", message)
|
|
|
|
root_logger.debug("dn: " + e.dn)
|
2008-09-15 19:51:01 -05:00
|
|
|
attr = e.attrList()
|
|
|
|
for a in attr:
|
|
|
|
value = e.getValues(a)
|
|
|
|
if isinstance(value,str):
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug(a + ": " + value)
|
2008-09-15 19:51:01 -05:00
|
|
|
else:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug(a + ": ")
|
2008-09-15 19:51:01 -05:00
|
|
|
for l in value:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug("\t" + l)
|
2009-09-14 15:12:58 -05:00
|
|
|
|
2008-09-17 22:18:09 -05:00
|
|
|
def is_schema_updated(self, s):
|
|
|
|
"""Compare the schema in 's' with the current schema in the DS to
|
|
|
|
see if anything has changed. This should account for syntax
|
|
|
|
differences (like added parens that make no difference but are
|
|
|
|
detected as a change by generateModList()).
|
|
|
|
|
|
|
|
This doesn't handle re-ordering of attributes. They are still
|
|
|
|
detected as changes, so foo $ bar != bar $ foo.
|
|
|
|
|
|
|
|
return True if the schema has changed
|
|
|
|
return False if it has not
|
|
|
|
"""
|
|
|
|
s = ldap.schema.SubSchema(s)
|
|
|
|
s = s.ldap_entry()
|
|
|
|
|
|
|
|
# Get a fresh copy and convert into a SubSchema
|
|
|
|
n = self.__get_entry("cn=schema")[0]
|
|
|
|
n = dict(n.data)
|
|
|
|
n = ldap.schema.SubSchema(n)
|
|
|
|
n = n.ldap_entry()
|
|
|
|
|
|
|
|
if s == n:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
return True
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
def __update_record(self, update):
|
|
|
|
found = False
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
new_entry = self.__create_default_entry(update.get('dn'),
|
|
|
|
update.get('default'))
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
try:
|
|
|
|
e = self.__get_entry(new_entry.dn)
|
|
|
|
if len(e) > 1:
|
|
|
|
# we should only ever get back one entry
|
|
|
|
raise BadSyntax, "More than 1 entry returned on a dn search!? %s" % new_entry.dn
|
|
|
|
entry = self.__entry_to_entity(e[0])
|
|
|
|
found = True
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.info("Updating existing entry: %s", entry.dn)
|
2009-04-23 07:51:59 -05:00
|
|
|
except errors.NotFound:
|
2008-09-15 19:51:01 -05:00
|
|
|
# Doesn't exist, start with the default entry
|
|
|
|
entry = new_entry
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.info("New entry: %s", entry.dn)
|
2009-04-23 07:51:59 -05:00
|
|
|
except errors.DatabaseError:
|
2008-09-15 19:51:01 -05:00
|
|
|
# Doesn't exist, start with the default entry
|
|
|
|
entry = new_entry
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.info("New entry, using default value: %s", entry.dn)
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
self.print_entity(entry)
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
# Bring this entry up to date
|
|
|
|
entry = self.__apply_updates(update.get('updates'), entry)
|
2009-09-14 15:12:58 -05:00
|
|
|
if entry is None:
|
|
|
|
# It might be None if it is just deleting an entry
|
|
|
|
return
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
self.print_entity(entry, "Final value")
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
if not found:
|
|
|
|
# New entries get their orig_data set to the entry itself. We want to
|
|
|
|
# empty that so that everything appears new when generating the
|
|
|
|
# modlist
|
|
|
|
# entry.orig_data = {}
|
|
|
|
try:
|
|
|
|
if self.live_run:
|
2011-10-05 16:16:05 -05:00
|
|
|
if len(entry.toTupleList()) > 0:
|
|
|
|
# addifexist may result in an entry with only a
|
|
|
|
# dn defined. In that case there is nothing to do.
|
|
|
|
# It means the entry doesn't exist, so skip it.
|
Ticket #1879 - IPAdmin undefined anonymous parameter lists
The IPAdmin class in ipaserver/ipaldap.py has methods with anonymous
undefined parameter lists.
For example:
def getList(self,*args):
In Python syntax this means you can call getList with any positional
parameter list you want.
This is bad because:
1) It's not true, *args gets passed to an ldap function with a well
defined parameter list, so you really do have to call it with a
defined parameter list. *args will let you pass anything, but once it
gets passed to the ldap function it will blow up if the parameters do
not match (what parameters are those you're wondering? see item 2).
2) The programmer does not know what the valid parameters are unless
they are defined in the formal parameter list.
3) Without a formal parameter list automatic documentation generators
cannot produce API documentation (see item 2)
4) The Python interpreter cannot validate the parameters being passed
because there is no formal parameter list. Note, Python does not
validate the type of parameters, but it does validate the correct
number of postitional parameters are passed and only defined keyword
parameters are passed. Bypassing the language support facilities leads
to programming errors.
5) Without a formal parameter list program checkers such as pylint
cannot validate the program which leads to progamming errors.
6) Without a formal parameter list which includes default keyword
parameters it's not possible to use keyword arguments nor to know what
their default values are (see item 2). One is forced to pass a keyword
argument as a positional argument, plus you must then pass every
keyword argument between the end of the positional argument list and
keyword arg of interest even of the other keyword arguments are not of
interest. This also demands you know what the default value of the
intermediate keyword arguments are (see item 2) and hope they don't
change.
Also the *args anonymous tuple get passed into the error handling code
so it can report what the called values were. But because the tuple is
anonymous the error handler cannot not describe what it was passed. In
addition the error handling code makes assumptions about the possible
contents of the anonymous tuple based on current practice instead of
actual defined values. Things like "if the number of items in the
tuple is 2 or less then the first tuple item must be a dn
(Distinguished Name)" or "if the number of items in the tuple is
greater than 2 then the 3rd item must be an ldap search filter". These
are constructs which are not robust and will fail at some point in the
future.
This patch also fixes the use of IPAdmin.addEntry(). It was sometimes
being called with (dn, modlist), sometimes a Entry object, or
sometimes a Entity object. Now it's always called with either a Entry
or Entity object and IPAdmin.addEntry() validates the type of the
parameter passed.
2011-09-26 09:39:15 -05:00
|
|
|
self.conn.addEntry(entry)
|
2009-09-14 15:12:58 -05:00
|
|
|
self.modified = True
|
2008-09-15 19:51:01 -05:00
|
|
|
except Exception, e:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.error("Add failure %s", e)
|
2008-09-15 19:51:01 -05:00
|
|
|
else:
|
|
|
|
# Update LDAP
|
|
|
|
try:
|
2008-09-17 22:18:09 -05:00
|
|
|
updated = False
|
|
|
|
changes = self.conn.generateModList(entry.origDataDict(), entry.toDict())
|
|
|
|
if (entry.dn == "cn=schema"):
|
|
|
|
updated = self.is_schema_updated(entry.toDict())
|
|
|
|
else:
|
2009-03-23 14:14:31 -05:00
|
|
|
if len(changes) >= 1:
|
2008-09-17 22:18:09 -05:00
|
|
|
updated = True
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.debug("%s" % changes)
|
|
|
|
root_logger.debug("Live %d, updated %d" % (self.live_run, updated))
|
2008-09-17 22:18:09 -05:00
|
|
|
if self.live_run and updated:
|
2008-09-15 19:51:01 -05:00
|
|
|
self.conn.updateEntry(entry.dn, entry.origDataDict(), entry.toDict())
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.info("Done")
|
2009-04-23 07:51:59 -05:00
|
|
|
except errors.EmptyModlist:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.info("Entry already up-to-date")
|
2008-09-17 22:18:09 -05:00
|
|
|
updated = False
|
2009-04-23 07:51:59 -05:00
|
|
|
except errors.DatabaseError, e:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.error("Update failed: %s", e)
|
2008-09-17 22:18:09 -05:00
|
|
|
updated = False
|
2011-03-18 10:19:53 -05:00
|
|
|
except errors.ACIError, e:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.error("Update failed: %s", e)
|
2011-03-18 10:19:53 -05:00
|
|
|
updated = False
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
if ("cn=index" in entry.dn and
|
|
|
|
"cn=userRoot" in entry.dn):
|
|
|
|
taskid = self.create_index_task(entry.cn)
|
|
|
|
self.monitor_index_task(taskid)
|
2008-09-17 22:18:09 -05:00
|
|
|
|
|
|
|
if updated:
|
|
|
|
self.modified = True
|
2008-09-15 19:51:01 -05:00
|
|
|
return
|
|
|
|
|
2009-09-14 15:12:58 -05:00
|
|
|
def __delete_record(self, updates):
|
|
|
|
"""
|
|
|
|
Run through all the updates again looking for any that should be
|
|
|
|
deleted.
|
|
|
|
|
|
|
|
This must use a reversed list so that the longest entries are
|
|
|
|
considered first so we don't end up trying to delete a parent
|
|
|
|
and child in the wrong order.
|
|
|
|
"""
|
|
|
|
dn = updates['dn']
|
Move Managed Entries into their own container in the replicated space.
Repoint cn=Managed Entries,cn=plugins,cn=config in common_setup
Create: cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Definitions,cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Templates,cn=Managed Entries,cn=etc,$SUFFIX
Create method for dynamically migrating any and all custom Managed Entries
from the cn=config space into the new container.
Separate the connection creation during update so that a restart can
be performed to initialize changes before performing a delete.
Add wait_for_open_socket() method in installutils
https://fedorahosted.org/freeipa/ticket/1708
2011-09-08 14:07:26 -05:00
|
|
|
deletes = updates.get('deleteentry', [])
|
|
|
|
for d in deletes:
|
|
|
|
try:
|
2011-11-23 16:16:05 -06:00
|
|
|
root_logger.info("Deleting entry %s", dn)
|
2011-11-23 15:52:40 -06:00
|
|
|
if self.live_run:
|
|
|
|
self.conn.deleteEntry(dn)
|
|
|
|
self.modified = True
|
Move Managed Entries into their own container in the replicated space.
Repoint cn=Managed Entries,cn=plugins,cn=config in common_setup
Create: cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Definitions,cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Templates,cn=Managed Entries,cn=etc,$SUFFIX
Create method for dynamically migrating any and all custom Managed Entries
from the cn=config space into the new container.
Separate the connection creation during update so that a restart can
be performed to initialize changes before performing a delete.
Add wait_for_open_socket() method in installutils
https://fedorahosted.org/freeipa/ticket/1708
2011-09-08 14:07:26 -05:00
|
|
|
except errors.NotFound, e:
|
2011-11-01 07:58:05 -05:00
|
|
|
root_logger.info("%s did not exist:%s", dn, e)
|
Move Managed Entries into their own container in the replicated space.
Repoint cn=Managed Entries,cn=plugins,cn=config in common_setup
Create: cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Definitions,cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Templates,cn=Managed Entries,cn=etc,$SUFFIX
Create method for dynamically migrating any and all custom Managed Entries
from the cn=config space into the new container.
Separate the connection creation during update so that a restart can
be performed to initialize changes before performing a delete.
Add wait_for_open_socket() method in installutils
https://fedorahosted.org/freeipa/ticket/1708
2011-09-08 14:07:26 -05:00
|
|
|
self.modified = True
|
|
|
|
except errors.DatabaseError, e:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.error("Delete failed: %s", e)
|
Move Managed Entries into their own container in the replicated space.
Repoint cn=Managed Entries,cn=plugins,cn=config in common_setup
Create: cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Definitions,cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Templates,cn=Managed Entries,cn=etc,$SUFFIX
Create method for dynamically migrating any and all custom Managed Entries
from the cn=config space into the new container.
Separate the connection creation during update so that a restart can
be performed to initialize changes before performing a delete.
Add wait_for_open_socket() method in installutils
https://fedorahosted.org/freeipa/ticket/1708
2011-09-08 14:07:26 -05:00
|
|
|
|
2009-10-02 08:23:40 -05:00
|
|
|
updates = updates.get('updates', [])
|
2009-09-14 15:12:58 -05:00
|
|
|
for u in updates:
|
|
|
|
# We already do syntax-parsing so this is safe
|
|
|
|
(utype, k, values) = u.split(':',2)
|
|
|
|
|
|
|
|
if utype == 'deleteentry':
|
|
|
|
try:
|
2011-11-23 16:16:05 -06:00
|
|
|
root_logger.info("Deleting entry %s", dn)
|
2011-11-23 15:52:40 -06:00
|
|
|
if self.live_run:
|
|
|
|
self.conn.deleteEntry(dn)
|
|
|
|
self.modified = True
|
2009-09-14 15:12:58 -05:00
|
|
|
except errors.NotFound, e:
|
2011-11-01 07:58:05 -05:00
|
|
|
root_logger.info("%s did not exist:%s", dn, e)
|
2009-09-14 15:12:58 -05:00
|
|
|
self.modified = True
|
|
|
|
except errors.DatabaseError, e:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.error("Delete failed: %s", e)
|
2009-09-14 15:12:58 -05:00
|
|
|
|
|
|
|
return
|
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
def get_all_files(self, root, recursive=False):
|
|
|
|
"""Get all update files"""
|
|
|
|
f = []
|
|
|
|
for path, subdirs, files in os.walk(root):
|
|
|
|
for name in files:
|
|
|
|
if fnmatch.fnmatch(name, "*.update"):
|
|
|
|
f.append(os.path.join(path, name))
|
|
|
|
if not recursive:
|
|
|
|
break
|
2009-03-23 14:14:31 -05:00
|
|
|
f.sort()
|
2008-09-15 19:51:01 -05:00
|
|
|
return f
|
|
|
|
|
Move Managed Entries into their own container in the replicated space.
Repoint cn=Managed Entries,cn=plugins,cn=config in common_setup
Create: cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Definitions,cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Templates,cn=Managed Entries,cn=etc,$SUFFIX
Create method for dynamically migrating any and all custom Managed Entries
from the cn=config space into the new container.
Separate the connection creation during update so that a restart can
be performed to initialize changes before performing a delete.
Add wait_for_open_socket() method in installutils
https://fedorahosted.org/freeipa/ticket/1708
2011-09-08 14:07:26 -05:00
|
|
|
def create_connection(self):
|
|
|
|
if self.online:
|
|
|
|
if self.ldapi:
|
|
|
|
self.conn = ipaldap.IPAdmin(ldapi=True, realm=self.realm)
|
|
|
|
else:
|
|
|
|
self.conn = ipaldap.IPAdmin(self.sub_dict['FQDN'],
|
|
|
|
ldapi=False,
|
|
|
|
realm=self.realm)
|
|
|
|
try:
|
|
|
|
if self.dm_password:
|
|
|
|
self.conn.do_simple_bind(binddn="cn=directory manager", bindpw=self.dm_password)
|
|
|
|
elif os.getegid() == 0:
|
|
|
|
try:
|
|
|
|
# autobind
|
|
|
|
self.conn.do_external_bind(self.pw_name)
|
|
|
|
except errors.NotFound:
|
|
|
|
# Fall back
|
|
|
|
self.conn.do_sasl_gssapi_bind()
|
|
|
|
else:
|
|
|
|
self.conn.do_sasl_gssapi_bind()
|
|
|
|
except ldap.LOCAL_ERROR, e:
|
|
|
|
raise RuntimeError('%s' % e.args[0].get('info', '').strip())
|
|
|
|
else:
|
|
|
|
raise RuntimeError("Offline updates are not supported.")
|
|
|
|
|
2011-11-23 15:52:40 -06:00
|
|
|
def __run_updates(self, dn_list, all_updates):
|
|
|
|
# For adds and updates we want to apply updates from shortest
|
|
|
|
# to greatest length of the DN. For deletes we want the reverse.
|
|
|
|
sortedkeys = dn_list.keys()
|
|
|
|
sortedkeys.sort()
|
|
|
|
for k in sortedkeys:
|
|
|
|
for dn in dn_list[k]:
|
|
|
|
self.__update_record(all_updates[dn])
|
|
|
|
|
|
|
|
sortedkeys.reverse()
|
|
|
|
for k in sortedkeys:
|
|
|
|
for dn in dn_list[k]:
|
|
|
|
self.__delete_record(all_updates[dn])
|
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
def update(self, files):
|
|
|
|
"""Execute the update. files is a list of the update files to use.
|
2008-09-17 22:18:09 -05:00
|
|
|
|
|
|
|
returns True if anything was changed, otherwise False
|
2008-09-15 19:51:01 -05:00
|
|
|
"""
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2011-11-23 15:52:40 -06:00
|
|
|
updates = None
|
|
|
|
if self.plugins:
|
2011-11-23 16:16:05 -06:00
|
|
|
root_logger.info('PRE_UPDATE')
|
2011-11-23 15:52:40 -06:00
|
|
|
updates = api.Backend.updateclient.update(PRE_UPDATE, self.dm_password, self.ldapi, self.live_run)
|
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
try:
|
Move Managed Entries into their own container in the replicated space.
Repoint cn=Managed Entries,cn=plugins,cn=config in common_setup
Create: cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Definitions,cn=Managed Entries,cn=etc,$SUFFIX
Create: cn=Templates,cn=Managed Entries,cn=etc,$SUFFIX
Create method for dynamically migrating any and all custom Managed Entries
from the cn=config space into the new container.
Separate the connection creation during update so that a restart can
be performed to initialize changes before performing a delete.
Add wait_for_open_socket() method in installutils
https://fedorahosted.org/freeipa/ticket/1708
2011-09-08 14:07:26 -05:00
|
|
|
self.create_connection()
|
2008-09-15 19:51:01 -05:00
|
|
|
all_updates = {}
|
|
|
|
dn_list = {}
|
2011-11-23 15:52:40 -06:00
|
|
|
# Start with any updates passed in from pre-update plugins
|
|
|
|
if updates:
|
|
|
|
for entry in updates:
|
|
|
|
all_updates.update(entry)
|
|
|
|
for upd in updates:
|
|
|
|
for dn in upd:
|
|
|
|
dn_explode = ldap.explode_dn(dn.lower())
|
|
|
|
l = len(dn_explode)
|
|
|
|
if dn_list.get(l):
|
|
|
|
if dn not in dn_list[l]:
|
|
|
|
dn_list[l].append(dn)
|
|
|
|
else:
|
|
|
|
dn_list[l] = [dn]
|
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
for f in files:
|
|
|
|
try:
|
2011-11-15 13:39:31 -06:00
|
|
|
root_logger.info("Parsing file %s" % f)
|
2008-09-15 19:51:01 -05:00
|
|
|
data = self.read_file(f)
|
|
|
|
except Exception, e:
|
|
|
|
print e
|
|
|
|
sys.exit(1)
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2008-09-15 19:51:01 -05:00
|
|
|
(all_updates, dn_list) = self.parse_update_file(data, all_updates, dn_list)
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2011-11-23 15:52:40 -06:00
|
|
|
self.__run_updates(dn_list, all_updates)
|
2008-09-15 19:51:01 -05:00
|
|
|
finally:
|
|
|
|
if self.conn: self.conn.unbind()
|
2009-02-04 09:53:34 -06:00
|
|
|
|
2011-11-23 15:52:40 -06:00
|
|
|
if self.plugins:
|
2011-11-23 16:16:05 -06:00
|
|
|
root_logger.info('POST_UPDATE')
|
2011-11-23 15:52:40 -06:00
|
|
|
updates = api.Backend.updateclient.update(POST_UPDATE, self.dm_password, self.ldapi, self.live_run)
|
|
|
|
dn_list = {}
|
|
|
|
for upd in updates:
|
|
|
|
for dn in upd:
|
|
|
|
dn_explode = ldap.explode_dn(dn.lower())
|
|
|
|
l = len(dn_explode)
|
|
|
|
if dn_list.get(l):
|
|
|
|
if dn not in dn_list[l]:
|
|
|
|
dn_list[l].append(dn)
|
|
|
|
else:
|
|
|
|
dn_list[l] = [dn]
|
|
|
|
self.__run_updates(dn_list, updates)
|
|
|
|
|
|
|
|
return self.modified
|
|
|
|
|
|
|
|
|
|
|
|
def update_from_dict(self, dn_list, updates):
|
|
|
|
"""
|
|
|
|
Apply updates internally as opposed to from a file.
|
|
|
|
|
|
|
|
dn_list is a list of dns to be updated
|
|
|
|
updates is a dictionary containing the updates
|
|
|
|
"""
|
|
|
|
if not self.conn:
|
|
|
|
self.create_connection()
|
|
|
|
|
|
|
|
self.__run_updates(dn_list, updates)
|
|
|
|
|
2008-09-17 22:18:09 -05:00
|
|
|
return self.modified
|