2007-09-04 15:13:15 -05:00
|
|
|
# Authors: Simo Sorce <ssorce@redhat.com>
|
2007-08-24 12:31:45 -05:00
|
|
|
#
|
2016-05-07 01:03:21 -05:00
|
|
|
# Copyright (C) 2007-2016 Red Hat, Inc.
|
2007-08-24 12:31:45 -05:00
|
|
|
# see file 'COPYING' for use and warranty information
|
|
|
|
#
|
2010-12-09 06:59:11 -06:00
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
2007-08-24 12:31:45 -05:00
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
2010-12-09 06:59:11 -06:00
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2007-08-24 12:31:45 -05:00
|
|
|
#
|
|
|
|
|
2015-08-12 06:44:11 -05:00
|
|
|
from __future__ import print_function
|
|
|
|
|
2017-01-12 09:20:43 -06:00
|
|
|
import codecs
|
2017-05-24 09:35:07 -05:00
|
|
|
import logging
|
2007-09-04 15:13:15 -05:00
|
|
|
import string
|
|
|
|
import tempfile
|
|
|
|
import subprocess
|
2008-01-22 10:42:45 -06:00
|
|
|
import random
|
2016-12-21 08:07:34 -06:00
|
|
|
import math
|
2016-03-20 15:21:10 -05:00
|
|
|
import os
|
|
|
|
import sys
|
2011-04-26 14:51:34 -05:00
|
|
|
import copy
|
0000-12-31 18:09:24 -05:50
|
|
|
import shutil
|
2011-05-22 12:17:07 -05:00
|
|
|
import socket
|
0000-12-31 18:09:24 -05:50
|
|
|
import re
|
2007-09-04 15:44:59 -05:00
|
|
|
import datetime
|
2011-05-27 13:17:22 -05:00
|
|
|
import netaddr
|
2016-04-13 09:14:42 -05:00
|
|
|
import netifaces
|
2012-05-24 10:23:36 -05:00
|
|
|
import time
|
2014-01-16 07:10:42 -06:00
|
|
|
import pwd
|
2015-06-26 05:44:20 -05:00
|
|
|
import grp
|
2015-08-10 11:29:33 -05:00
|
|
|
from contextlib import contextmanager
|
2015-11-25 10:17:18 -06:00
|
|
|
import locale
|
|
|
|
import collections
|
2015-08-10 11:29:33 -05:00
|
|
|
|
2015-12-16 09:06:03 -06:00
|
|
|
from dns import resolver, reversename
|
2015-12-16 06:37:39 -06:00
|
|
|
from dns.exception import DNSException
|
2015-12-16 09:06:03 -06:00
|
|
|
|
2015-08-10 11:29:33 -05:00
|
|
|
import six
|
2015-08-11 10:07:11 -05:00
|
|
|
from six.moves import input
|
2015-09-14 05:52:29 -05:00
|
|
|
from six.moves import urllib
|
2012-03-20 11:29:36 -05:00
|
|
|
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 06:36:35 -05:00
|
|
|
from ipapython.dn import DN
|
2012-07-03 09:49:10 -05:00
|
|
|
|
2017-05-24 09:35:07 -05:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2016-12-21 08:07:34 -06:00
|
|
|
# only for OTP password that is manually retyped by user
|
|
|
|
TMP_PWD_ENTROPY_BITS = 128
|
2014-05-29 07:47:17 -05:00
|
|
|
|
2007-08-24 12:31:45 -05:00
|
|
|
|
2016-11-29 11:19:07 -06:00
|
|
|
PROTOCOL_NAMES = {
|
|
|
|
socket.SOCK_STREAM: 'tcp',
|
|
|
|
socket.SOCK_DGRAM: 'udp'
|
|
|
|
}
|
|
|
|
|
2017-06-14 07:47:23 -05:00
|
|
|
InterfaceDetails = collections.namedtuple(
|
|
|
|
'InterfaceDetails', [
|
|
|
|
'name', # interface name
|
|
|
|
'ifnet' # network details of interface
|
|
|
|
])
|
|
|
|
|
2016-11-29 11:19:07 -06:00
|
|
|
|
2016-06-30 13:41:48 -05:00
|
|
|
class UnsafeIPAddress(netaddr.IPAddress):
|
|
|
|
"""Any valid IP address with or without netmask."""
|
2011-10-13 07:35:06 -05:00
|
|
|
|
|
|
|
# Use inet_pton() rather than inet_aton() for IP address parsing. We
|
|
|
|
# will use the same function in IPv4/IPv6 conversions + be stricter
|
|
|
|
# and don't allow IP addresses such as '1.1.1' in the same time
|
|
|
|
netaddr_ip_flags = netaddr.INET_PTON
|
|
|
|
|
2016-06-30 13:41:48 -05:00
|
|
|
def __init__(self, addr):
|
|
|
|
if isinstance(addr, UnsafeIPAddress):
|
|
|
|
self._net = addr._net
|
|
|
|
super(UnsafeIPAddress, self).__init__(addr,
|
|
|
|
flags=self.netaddr_ip_flags)
|
|
|
|
return
|
|
|
|
|
|
|
|
elif isinstance(addr, netaddr.IPAddress):
|
|
|
|
self._net = None # no information about netmask
|
|
|
|
super(UnsafeIPAddress, self).__init__(addr,
|
|
|
|
flags=self.netaddr_ip_flags)
|
|
|
|
return
|
|
|
|
|
|
|
|
elif isinstance(addr, netaddr.IPNetwork):
|
|
|
|
self._net = addr
|
|
|
|
super(UnsafeIPAddress, self).__init__(self._net.ip,
|
|
|
|
flags=self.netaddr_ip_flags)
|
|
|
|
return
|
|
|
|
|
|
|
|
# option of last resort: parse it as string
|
|
|
|
self._net = None
|
|
|
|
addr = str(addr)
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
addr = netaddr.IPAddress(addr, flags=self.netaddr_ip_flags)
|
|
|
|
except netaddr.AddrFormatError:
|
|
|
|
# netaddr.IPAddress doesn't handle zone indices in textual
|
|
|
|
# IPv6 addresses. Try removing zone index and parse the
|
|
|
|
# address again.
|
2016-10-07 08:07:49 -05:00
|
|
|
addr, sep, _foo = addr.partition('%')
|
2016-06-30 13:41:48 -05:00
|
|
|
if sep != '%':
|
|
|
|
raise
|
|
|
|
addr = netaddr.IPAddress(addr, flags=self.netaddr_ip_flags)
|
|
|
|
if addr.version != 6:
|
|
|
|
raise
|
|
|
|
except ValueError:
|
|
|
|
self._net = netaddr.IPNetwork(addr, flags=self.netaddr_ip_flags)
|
|
|
|
addr = self._net.ip
|
|
|
|
super(UnsafeIPAddress, self).__init__(addr,
|
|
|
|
flags=self.netaddr_ip_flags)
|
|
|
|
|
2016-10-06 06:31:52 -05:00
|
|
|
def __getstate__(self):
|
|
|
|
state = {
|
|
|
|
'_net': self._net,
|
|
|
|
'super_state': super(UnsafeIPAddress, self).__getstate__(),
|
|
|
|
}
|
|
|
|
return state
|
|
|
|
|
|
|
|
def __setstate__(self, state):
|
|
|
|
super(UnsafeIPAddress, self).__setstate__(state['super_state'])
|
|
|
|
self._net = state['_net']
|
|
|
|
|
2016-06-30 13:41:48 -05:00
|
|
|
|
|
|
|
class CheckedIPAddress(UnsafeIPAddress):
|
|
|
|
"""IPv4 or IPv6 address with additional constraints.
|
|
|
|
|
|
|
|
Reserved or link-local addresses are never accepted.
|
|
|
|
"""
|
2017-06-14 07:54:43 -05:00
|
|
|
def __init__(self, addr, parse_netmask=True,
|
2016-09-02 10:07:03 -05:00
|
|
|
allow_loopback=False, allow_multicast=False):
|
2016-09-20 05:10:09 -05:00
|
|
|
try:
|
|
|
|
super(CheckedIPAddress, self).__init__(addr)
|
|
|
|
except netaddr.core.AddrFormatError as e:
|
|
|
|
raise ValueError(e)
|
2016-06-30 13:41:48 -05:00
|
|
|
|
2011-05-27 13:17:22 -05:00
|
|
|
if isinstance(addr, CheckedIPAddress):
|
|
|
|
self.prefixlen = addr.prefixlen
|
|
|
|
return
|
|
|
|
|
2016-06-30 13:41:48 -05:00
|
|
|
if not parse_netmask and self._net:
|
|
|
|
raise ValueError(
|
|
|
|
"netmask and prefix length not allowed here: {}".format(addr))
|
2011-05-27 13:17:22 -05:00
|
|
|
|
2016-06-30 13:41:48 -05:00
|
|
|
if self.version not in (4, 6):
|
|
|
|
raise ValueError("unsupported IP version {}".format(self.version))
|
|
|
|
|
|
|
|
if not allow_loopback and self.is_loopback():
|
|
|
|
raise ValueError("cannot use loopback IP address {}".format(addr))
|
|
|
|
if (not self.is_loopback() and self.is_reserved()) \
|
|
|
|
or self in netaddr.ip.IPV4_6TO4:
|
|
|
|
raise ValueError(
|
|
|
|
"cannot use IANA reserved IP address {}".format(addr))
|
|
|
|
|
|
|
|
if self.is_link_local():
|
|
|
|
raise ValueError(
|
|
|
|
"cannot use link-local IP address {}".format(addr))
|
|
|
|
if not allow_multicast and self.is_multicast():
|
|
|
|
raise ValueError("cannot use multicast IP address {}".format(addr))
|
2011-05-27 13:17:22 -05:00
|
|
|
|
2016-06-30 13:41:48 -05:00
|
|
|
if self._net is None:
|
|
|
|
if self.version == 4:
|
|
|
|
self._net = netaddr.IPNetwork(
|
|
|
|
netaddr.cidr_abbrev_to_verbose(str(self)))
|
|
|
|
elif self.version == 6:
|
|
|
|
self._net = netaddr.IPNetwork(str(self) + '/64')
|
2011-06-13 15:37:40 -05:00
|
|
|
|
2016-06-30 13:41:48 -05:00
|
|
|
self.prefixlen = self._net.prefixlen
|
2011-05-27 06:51:21 -05:00
|
|
|
|
2016-10-06 06:31:52 -05:00
|
|
|
def __getstate__(self):
|
|
|
|
state = {
|
|
|
|
'prefixlen': self.prefixlen,
|
|
|
|
'super_state': super(CheckedIPAddress, self).__getstate__(),
|
|
|
|
}
|
|
|
|
return state
|
|
|
|
|
|
|
|
def __setstate__(self, state):
|
|
|
|
super(CheckedIPAddress, self).__setstate__(state['super_state'])
|
|
|
|
self.prefixlen = state['prefixlen']
|
|
|
|
|
2016-09-02 06:25:19 -05:00
|
|
|
def is_network_addr(self):
|
|
|
|
return self == self._net.network
|
|
|
|
|
2016-09-02 10:07:03 -05:00
|
|
|
def is_broadcast_addr(self):
|
|
|
|
return self.version == 4 and self == self._net.broadcast
|
|
|
|
|
2017-05-31 08:50:05 -05:00
|
|
|
def get_matching_interface(self):
|
|
|
|
"""Find matching local interface for address
|
2017-06-14 07:47:23 -05:00
|
|
|
:return: InterfaceDetails named tuple or None if no interface has
|
|
|
|
this address
|
2017-05-31 08:50:05 -05:00
|
|
|
"""
|
2017-05-24 09:35:07 -05:00
|
|
|
logger.debug("Searching for an interface of IP address: %s", self)
|
2017-05-31 08:50:05 -05:00
|
|
|
if self.version == 4:
|
|
|
|
family = netifaces.AF_INET
|
|
|
|
elif self.version == 6:
|
|
|
|
family = netifaces.AF_INET6
|
|
|
|
else:
|
|
|
|
raise ValueError(
|
|
|
|
"Unsupported address family ({})".format(self.version)
|
|
|
|
)
|
|
|
|
|
|
|
|
for interface in netifaces.interfaces():
|
|
|
|
for ifdata in netifaces.ifaddresses(interface).get(family, []):
|
|
|
|
|
|
|
|
# link-local addresses contain '%suffix' that causes parse
|
|
|
|
# errors in IPNetwork
|
|
|
|
ifaddr = ifdata['addr'].split(u'%', 1)[0]
|
|
|
|
|
2017-06-16 06:42:53 -05:00
|
|
|
# newer versions of netifaces provide IPv6 netmask in format
|
|
|
|
# 'ffff:ffff:ffff:ffff::/64'. We have to split and use prefix
|
|
|
|
# or the netmask with older versions
|
|
|
|
ifmask = ifdata['netmask'].split(u'/')[-1]
|
|
|
|
|
|
|
|
ifaddrmask = '{addr}/{netmask}'.format(
|
2017-05-31 08:50:05 -05:00
|
|
|
addr=ifaddr,
|
2017-06-16 06:42:53 -05:00
|
|
|
netmask=ifmask
|
|
|
|
)
|
2017-05-24 09:35:07 -05:00
|
|
|
logger.debug(
|
2017-06-16 06:42:53 -05:00
|
|
|
"Testing local IP address: %s (interface: %s)",
|
|
|
|
ifaddrmask, interface)
|
|
|
|
|
|
|
|
ifnet = netaddr.IPNetwork(ifaddrmask)
|
2017-06-13 10:03:30 -05:00
|
|
|
|
|
|
|
if ifnet.ip == self:
|
2017-06-14 07:47:23 -05:00
|
|
|
return InterfaceDetails(interface, ifnet)
|
2017-12-15 10:00:04 -06:00
|
|
|
return None
|
2017-06-14 07:47:23 -05:00
|
|
|
|
|
|
|
def set_ip_net(self, ifnet):
|
|
|
|
"""Set IP Network details for this address. IPNetwork is valid only
|
|
|
|
locally, so this should be set only for local IP addresses
|
2017-05-31 08:50:05 -05:00
|
|
|
|
2017-06-14 07:47:23 -05:00
|
|
|
:param ifnet: netaddr.IPNetwork object with information about IP
|
|
|
|
network where particula address belongs locally
|
|
|
|
"""
|
|
|
|
assert isinstance(ifnet, netaddr.IPNetwork)
|
|
|
|
self._net = ifnet
|
2017-05-31 08:50:05 -05:00
|
|
|
|
2011-05-27 13:17:22 -05:00
|
|
|
|
2017-10-23 06:45:56 -05:00
|
|
|
class CheckedIPAddressLoopback(CheckedIPAddress):
|
|
|
|
"""IPv4 or IPv6 address with additional constraints with
|
|
|
|
possibility to use a loopback IP.
|
|
|
|
Reserved or link-local addresses are never accepted.
|
|
|
|
"""
|
|
|
|
def __init__(self, addr, parse_netmask=True, allow_multicast=False):
|
|
|
|
|
|
|
|
super(CheckedIPAddressLoopback, self).__init__(
|
|
|
|
addr, parse_netmask=parse_netmask,
|
|
|
|
allow_multicast=allow_multicast,
|
|
|
|
allow_loopback=True)
|
|
|
|
|
|
|
|
if self.is_loopback():
|
|
|
|
# print is being used instead of a logger, because at this
|
|
|
|
# moment, in execution process, there is no logger configured
|
|
|
|
print("WARNING: You are using a loopback IP: {}".format(addr),
|
|
|
|
file=sys.stderr)
|
|
|
|
|
|
|
|
|
2011-07-25 10:14:01 -05:00
|
|
|
def valid_ip(addr):
|
|
|
|
return netaddr.valid_ipv4(addr) or netaddr.valid_ipv6(addr)
|
|
|
|
|
2011-09-30 03:09:55 -05:00
|
|
|
def format_netloc(host, port=None):
|
|
|
|
"""
|
|
|
|
Format network location (host:port).
|
|
|
|
|
|
|
|
If the host part is a literal IPv6 address, it must be enclosed in square
|
|
|
|
brackets (RFC 2732).
|
|
|
|
"""
|
|
|
|
host = str(host)
|
|
|
|
try:
|
|
|
|
socket.inet_pton(socket.AF_INET6, host)
|
|
|
|
host = '[%s]' % host
|
|
|
|
except socket.error:
|
|
|
|
pass
|
|
|
|
if port is None:
|
|
|
|
return host
|
|
|
|
else:
|
|
|
|
return '%s:%s' % (host, str(port))
|
|
|
|
|
2007-09-04 15:13:15 -05:00
|
|
|
def realm_to_suffix(realm_name):
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 06:36:35 -05:00
|
|
|
'Convert a kerberos realm to a IPA suffix.'
|
2007-09-04 15:13:15 -05:00
|
|
|
s = realm_name.split(".")
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 06:36:35 -05:00
|
|
|
suffix_dn = DN(*[('dc', x.lower()) for x in s])
|
|
|
|
return suffix_dn
|
|
|
|
|
|
|
|
def suffix_to_realm(suffix_dn):
|
|
|
|
'Convert a IPA suffix to a kerberos realm.'
|
|
|
|
assert isinstance(suffix_dn, DN)
|
|
|
|
realm = '.'.join([x.value for x in suffix_dn])
|
|
|
|
return realm
|
2007-09-04 15:13:15 -05:00
|
|
|
|
|
|
|
def template_str(txt, vars):
|
2009-08-27 13:12:55 -05:00
|
|
|
val = string.Template(txt).substitute(vars)
|
|
|
|
|
|
|
|
# eval() is a special string one can insert into a template to have the
|
|
|
|
# Python interpreter evaluate the string. This is intended to allow
|
|
|
|
# math to be performed in templates.
|
|
|
|
pattern = re.compile('(eval\s*\(([^()]*)\))')
|
|
|
|
val = pattern.sub(lambda x: str(eval(x.group(2))), val)
|
|
|
|
|
|
|
|
return val
|
2007-09-04 15:13:15 -05:00
|
|
|
|
|
|
|
def template_file(infilename, vars):
|
2012-10-08 06:54:47 -05:00
|
|
|
"""Read a file and perform template substitutions"""
|
|
|
|
with open(infilename) as f:
|
|
|
|
return template_str(f.read(), vars)
|
|
|
|
|
|
|
|
def copy_template_file(infilename, outfilename, vars):
|
|
|
|
"""Copy a file, performing template substitutions"""
|
|
|
|
txt = template_file(infilename, vars)
|
|
|
|
with open(outfilename, 'w') as file:
|
|
|
|
file.write(txt)
|
|
|
|
|
2007-09-04 15:13:15 -05:00
|
|
|
|
|
|
|
def write_tmp_file(txt):
|
2016-01-26 08:05:14 -06:00
|
|
|
fd = tempfile.NamedTemporaryFile('w+')
|
2007-09-04 15:13:15 -05:00
|
|
|
fd.write(txt)
|
|
|
|
fd.flush()
|
|
|
|
|
|
|
|
return fd
|
|
|
|
|
2017-11-08 08:15:30 -06:00
|
|
|
|
|
|
|
def flush_sync(f):
|
|
|
|
"""Flush and fsync file to disk
|
|
|
|
|
|
|
|
:param f: a file object with fileno and name
|
|
|
|
"""
|
|
|
|
# flush file buffer to file descriptor
|
|
|
|
f.flush()
|
|
|
|
# flush Kernel buffer to disk
|
|
|
|
os.fsync(f.fileno())
|
|
|
|
# sync metadata in directory
|
|
|
|
dirname = os.path.dirname(os.path.abspath(f.name))
|
|
|
|
dirfd = os.open(dirname, os.O_RDONLY | os.O_DIRECTORY)
|
|
|
|
try:
|
|
|
|
os.fsync(dirfd)
|
|
|
|
finally:
|
|
|
|
os.close(dirfd)
|
|
|
|
|
|
|
|
|
2011-09-26 01:27:01 -05:00
|
|
|
def shell_quote(string):
|
2015-11-25 10:17:18 -06:00
|
|
|
if isinstance(string, str):
|
|
|
|
return "'" + string.replace("'", "'\\''") + "'"
|
|
|
|
else:
|
|
|
|
return b"'" + string.replace(b"'", b"'\\''") + b"'"
|
|
|
|
|
|
|
|
|
|
|
|
class _RunResult(collections.namedtuple('_RunResult',
|
|
|
|
'output error_output returncode')):
|
|
|
|
"""Result of ipautil.run"""
|
|
|
|
|
|
|
|
|
2018-02-16 05:14:11 -06:00
|
|
|
class CalledProcessError(subprocess.CalledProcessError):
|
|
|
|
"""CalledProcessError with stderr
|
|
|
|
|
|
|
|
Hold stderr of failed call and print it in repr() to simplify debugging.
|
|
|
|
"""
|
|
|
|
def __init__(self, returncode, cmd, output=None, stderr=None):
|
|
|
|
super(CalledProcessError, self).__init__(returncode, cmd, output)
|
|
|
|
self.stderr = stderr
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
args = [
|
|
|
|
self.__class__.__name__, '('
|
|
|
|
'Command {!s} '.format(self.cmd),
|
|
|
|
'returned non-zero exit status {!r}'.format(self.returncode)
|
|
|
|
]
|
|
|
|
if self.stderr is not None:
|
|
|
|
args.append(': {!r}'.format(self.stderr))
|
|
|
|
args.append(')')
|
|
|
|
return ''.join(args)
|
|
|
|
|
|
|
|
__repr__ = __str__
|
|
|
|
|
|
|
|
|
2015-11-25 10:17:18 -06:00
|
|
|
def run(args, stdin=None, raiseonerr=True, nolog=(), env=None,
|
|
|
|
capture_output=False, skip_output=False, cwd=None,
|
2016-11-23 10:47:40 -06:00
|
|
|
runas=None, suplementary_groups=[],
|
2017-04-11 10:35:30 -05:00
|
|
|
capture_error=False, encoding=None, redirect_output=False, umask=None):
|
2010-03-15 16:06:24 -05:00
|
|
|
"""
|
2015-11-25 10:17:18 -06:00
|
|
|
Execute an external command.
|
2010-03-15 16:06:24 -05:00
|
|
|
|
2014-01-16 07:10:42 -06:00
|
|
|
:param args: List of arguments for the command
|
|
|
|
:param stdin: Optional input to the command
|
|
|
|
:param raiseonerr: If True, raises an exception if the return code is
|
|
|
|
not zero
|
|
|
|
:param nolog: Tuple of strings that shouldn't be logged, like passwords.
|
|
|
|
Each tuple consists of a string to be replaced by XXXXXXXX.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
We have a command
|
2016-11-23 07:01:07 -06:00
|
|
|
['/usr/bin/setpasswd', '--password', 'Secret123', 'someuser']
|
2014-01-16 07:10:42 -06:00
|
|
|
and we don't want to log the password so nolog would be set to:
|
|
|
|
('Secret123',)
|
|
|
|
The resulting log output would be:
|
|
|
|
|
|
|
|
/usr/bin/setpasswd --password XXXXXXXX someuser
|
|
|
|
|
|
|
|
If a value isn't found in the list it is silently ignored.
|
|
|
|
:param env: Dictionary of environment variables passed to the command.
|
|
|
|
When None, current environment is copied
|
2015-11-25 10:17:18 -06:00
|
|
|
:param capture_output: Capture stdout
|
|
|
|
:param skip_output: Redirect the output to /dev/null and do not log it
|
2014-01-16 07:10:42 -06:00
|
|
|
:param cwd: Current working directory
|
2015-06-26 05:44:20 -05:00
|
|
|
:param runas: Name of a user that the command should be run as. The spawned
|
2014-01-16 07:10:42 -06:00
|
|
|
process will have both real and effective UID and GID set.
|
2015-06-26 05:44:20 -05:00
|
|
|
:param suplementary_groups: List of group names that will be used as
|
|
|
|
suplementary groups for subporcess.
|
|
|
|
The option runas must be specified together with this option.
|
2015-11-25 10:17:18 -06:00
|
|
|
:param capture_error: Capture stderr
|
|
|
|
:param encoding: For Python 3, the encoding to use for output,
|
|
|
|
error_output, and (if it's not bytes) stdin.
|
|
|
|
If None, the current encoding according to locale is used.
|
2015-12-14 05:45:45 -06:00
|
|
|
:param redirect_output: Redirect (error) output to standard (error) output.
|
2017-04-11 10:35:30 -05:00
|
|
|
:param umask: Set file-creation mask before running the command.
|
2015-11-25 10:17:18 -06:00
|
|
|
|
|
|
|
:return: An object with these attributes:
|
|
|
|
|
|
|
|
`returncode`: The process' exit status
|
|
|
|
|
|
|
|
`output` and `error_output`: captured output, as strings. Under
|
|
|
|
Python 3, these are encoded with the given `encoding`.
|
|
|
|
None unless `capture_output` or `capture_error`, respectively, are
|
|
|
|
given
|
|
|
|
|
|
|
|
`raw_output`, `raw_error_output`: captured output, as bytes.
|
|
|
|
|
|
|
|
`output_log` and `error_log`: The captured output, as strings, with any
|
|
|
|
unencodable characters discarded. These should only be used
|
|
|
|
for logging or error messages.
|
|
|
|
|
|
|
|
If skip_output is given, all output-related attributes on the result
|
|
|
|
(that is, all except `returncode`) are None.
|
|
|
|
|
|
|
|
For backwards compatibility, the return value can also be used as a
|
|
|
|
(output, error_output, returncode) triple.
|
2010-03-15 16:06:24 -05:00
|
|
|
"""
|
2015-06-26 05:44:20 -05:00
|
|
|
assert isinstance(suplementary_groups, list)
|
2011-01-17 08:17:08 -06:00
|
|
|
p_in = None
|
|
|
|
p_out = None
|
|
|
|
p_err = None
|
|
|
|
|
2015-08-10 11:29:33 -05:00
|
|
|
if isinstance(nolog, six.string_types):
|
2012-02-13 07:10:16 -06:00
|
|
|
# We expect a tuple (or list, or other iterable) of nolog strings.
|
2015-11-25 10:17:18 -06:00
|
|
|
# Passing just a single string is bad: strings are iterable, so this
|
2012-02-13 07:10:16 -06:00
|
|
|
# would result in every individual character of that string being
|
|
|
|
# replaced by XXXXXXXX.
|
|
|
|
# This is a sanity check to prevent that.
|
|
|
|
raise ValueError('nolog must be a tuple of strings.')
|
|
|
|
|
2015-11-25 10:17:18 -06:00
|
|
|
if skip_output and (capture_output or capture_error):
|
|
|
|
raise ValueError('skip_output is incompatible with '
|
|
|
|
'capture_output or capture_error')
|
|
|
|
|
2015-12-14 05:45:45 -06:00
|
|
|
if redirect_output and (capture_output or capture_error):
|
|
|
|
raise ValueError('redirect_output is incompatible with '
|
|
|
|
'capture_output or capture_error')
|
|
|
|
|
|
|
|
if skip_output and redirect_output:
|
|
|
|
raise ValueError('skip_output is incompatible with redirect_output')
|
|
|
|
|
2010-08-31 15:59:27 -05:00
|
|
|
if env is None:
|
2011-04-26 14:51:34 -05:00
|
|
|
# copy default env
|
|
|
|
env = copy.deepcopy(os.environ)
|
|
|
|
env["PATH"] = "/bin:/sbin:/usr/kerberos/bin:/usr/kerberos/sbin:/usr/bin:/usr/sbin"
|
2007-09-04 15:13:15 -05:00
|
|
|
if stdin:
|
2011-01-17 08:17:08 -06:00
|
|
|
p_in = subprocess.PIPE
|
2013-07-17 07:11:57 -05:00
|
|
|
if skip_output:
|
2016-11-24 02:46:42 -06:00
|
|
|
p_out = p_err = open(os.devnull, 'w')
|
2015-12-14 05:45:45 -06:00
|
|
|
elif redirect_output:
|
|
|
|
p_out = sys.stdout
|
|
|
|
p_err = sys.stderr
|
2015-11-25 10:17:18 -06:00
|
|
|
else:
|
2011-01-17 08:17:08 -06:00
|
|
|
p_out = subprocess.PIPE
|
|
|
|
p_err = subprocess.PIPE
|
|
|
|
|
2015-11-25 10:17:18 -06:00
|
|
|
if encoding is None:
|
|
|
|
encoding = locale.getpreferredencoding()
|
|
|
|
|
|
|
|
if six.PY3 and isinstance(stdin, str):
|
|
|
|
stdin = stdin.encode(encoding)
|
|
|
|
|
2018-02-16 05:14:11 -06:00
|
|
|
arg_string = nolog_replace(repr(args), nolog)
|
2017-05-24 09:35:07 -05:00
|
|
|
logger.debug('Starting external process')
|
|
|
|
logger.debug('args=%s', arg_string)
|
2012-09-25 08:29:49 -05:00
|
|
|
|
2017-04-11 10:35:30 -05:00
|
|
|
def preexec_fn():
|
|
|
|
if runas is not None:
|
|
|
|
pent = pwd.getpwnam(runas)
|
|
|
|
|
|
|
|
suplementary_gids = [
|
2018-05-17 09:49:27 -05:00
|
|
|
grp.getgrnam(sgroup).gr_gid for sgroup in suplementary_groups
|
2017-04-11 10:35:30 -05:00
|
|
|
]
|
|
|
|
|
2017-05-24 09:35:07 -05:00
|
|
|
logger.debug('runas=%s (UID %d, GID %s)', runas,
|
|
|
|
pent.pw_uid, pent.pw_gid)
|
2017-04-11 10:35:30 -05:00
|
|
|
if suplementary_groups:
|
|
|
|
for group, gid in zip(suplementary_groups, suplementary_gids):
|
2017-05-24 09:35:07 -05:00
|
|
|
logger.debug('suplementary_group=%s (GID %d)', group, gid)
|
2017-04-11 10:35:30 -05:00
|
|
|
|
|
|
|
os.setgroups(suplementary_gids)
|
|
|
|
os.setregid(pent.pw_gid, pent.pw_gid)
|
|
|
|
os.setreuid(pent.pw_uid, pent.pw_uid)
|
|
|
|
|
|
|
|
if umask:
|
|
|
|
os.umask(umask)
|
2014-01-16 07:10:42 -06:00
|
|
|
|
2012-03-20 11:29:36 -05:00
|
|
|
try:
|
2018-07-12 15:20:59 -05:00
|
|
|
# pylint: disable=subprocess-popen-preexec-fn
|
2012-03-20 11:29:36 -05:00
|
|
|
p = subprocess.Popen(args, stdin=p_in, stdout=p_out, stderr=p_err,
|
2014-01-16 07:10:42 -06:00
|
|
|
close_fds=True, env=env, cwd=cwd,
|
|
|
|
preexec_fn=preexec_fn)
|
2015-11-25 10:17:18 -06:00
|
|
|
stdout, stderr = p.communicate(stdin)
|
2012-03-20 11:29:36 -05:00
|
|
|
except KeyboardInterrupt:
|
2017-05-24 09:35:07 -05:00
|
|
|
logger.debug('Process interrupted')
|
2012-03-20 11:29:36 -05:00
|
|
|
p.wait()
|
|
|
|
raise
|
2012-09-25 08:29:49 -05:00
|
|
|
except:
|
2017-05-24 09:35:07 -05:00
|
|
|
logger.debug('Process execution failed')
|
2012-09-25 08:29:49 -05:00
|
|
|
raise
|
2013-07-17 07:11:57 -05:00
|
|
|
finally:
|
|
|
|
if skip_output:
|
|
|
|
p_out.close() # pylint: disable=E1103
|
2012-09-25 08:29:49 -05:00
|
|
|
|
2017-05-24 09:35:07 -05:00
|
|
|
logger.debug('Process finished, return code=%s', p.returncode)
|
2008-08-07 15:14:37 -05:00
|
|
|
|
2010-08-31 15:50:47 -05:00
|
|
|
# The command and its output may include passwords that we don't want
|
2012-08-23 11:38:45 -05:00
|
|
|
# to log. Replace those.
|
2015-12-14 05:45:45 -06:00
|
|
|
if skip_output or redirect_output:
|
2015-11-25 10:17:18 -06:00
|
|
|
output_log = None
|
|
|
|
error_log = None
|
|
|
|
else:
|
|
|
|
if six.PY3:
|
|
|
|
output_log = stdout.decode(locale.getpreferredencoding(),
|
|
|
|
errors='replace')
|
|
|
|
else:
|
|
|
|
output_log = stdout
|
|
|
|
if six.PY3:
|
|
|
|
error_log = stderr.decode(locale.getpreferredencoding(),
|
|
|
|
errors='replace')
|
|
|
|
else:
|
|
|
|
error_log = stderr
|
|
|
|
output_log = nolog_replace(output_log, nolog)
|
2017-05-24 09:35:07 -05:00
|
|
|
logger.debug('stdout=%s', output_log)
|
2015-11-25 10:17:18 -06:00
|
|
|
error_log = nolog_replace(error_log, nolog)
|
2017-05-24 09:35:07 -05:00
|
|
|
logger.debug('stderr=%s', error_log)
|
2015-11-25 10:17:18 -06:00
|
|
|
|
|
|
|
if capture_output:
|
|
|
|
if six.PY2:
|
|
|
|
output = stdout
|
|
|
|
else:
|
2016-01-26 06:56:54 -06:00
|
|
|
output = stdout.decode(encoding)
|
2015-11-25 10:17:18 -06:00
|
|
|
else:
|
|
|
|
output = None
|
|
|
|
|
|
|
|
if capture_error:
|
|
|
|
if six.PY2:
|
|
|
|
error_output = stderr
|
|
|
|
else:
|
2016-01-26 06:56:54 -06:00
|
|
|
error_output = stderr.decode(encoding)
|
2015-11-25 10:17:18 -06:00
|
|
|
else:
|
|
|
|
error_output = None
|
2007-09-04 15:13:15 -05:00
|
|
|
|
2009-11-30 14:28:09 -06:00
|
|
|
if p.returncode != 0 and raiseonerr:
|
2018-02-16 05:14:11 -06:00
|
|
|
raise CalledProcessError(
|
|
|
|
p.returncode, arg_string, output_log, error_log
|
|
|
|
)
|
2007-09-04 15:13:15 -05:00
|
|
|
|
2015-11-25 10:17:18 -06:00
|
|
|
result = _RunResult(output, error_output, p.returncode)
|
|
|
|
result.raw_output = stdout
|
|
|
|
result.raw_error_output = stderr
|
|
|
|
result.output_log = output_log
|
|
|
|
result.error_log = error_log
|
|
|
|
return result
|
2008-01-11 04:36:25 -06:00
|
|
|
|
2012-08-23 11:38:45 -05:00
|
|
|
|
|
|
|
def nolog_replace(string, nolog):
|
|
|
|
"""Replace occurences of strings given in `nolog` with XXXXXXXX"""
|
|
|
|
for value in nolog:
|
2017-03-20 06:48:14 -05:00
|
|
|
if not value or not isinstance(value, six.string_types):
|
2012-08-23 11:38:45 -05:00
|
|
|
continue
|
|
|
|
|
2015-09-14 05:52:29 -05:00
|
|
|
quoted = urllib.parse.quote(value)
|
2012-08-23 11:38:45 -05:00
|
|
|
shquoted = shell_quote(value)
|
|
|
|
for nolog_value in (shquoted, value, quoted):
|
|
|
|
string = string.replace(nolog_value, 'XXXXXXXX')
|
|
|
|
return string
|
|
|
|
|
|
|
|
|
0000-12-31 18:09:24 -05:50
|
|
|
def install_file(fname, dest):
|
2016-07-21 11:49:57 -05:00
|
|
|
# SELinux: use copy to keep the right context
|
2017-10-20 04:10:20 -05:00
|
|
|
if os.path.isfile(dest):
|
0000-12-31 18:09:24 -05:50
|
|
|
os.rename(dest, dest + ".orig")
|
2016-07-21 11:49:57 -05:00
|
|
|
shutil.copy(fname, dest)
|
|
|
|
os.remove(fname)
|
|
|
|
|
0000-12-31 18:09:24 -05:50
|
|
|
|
|
|
|
def backup_file(fname):
|
2017-10-20 04:10:20 -05:00
|
|
|
if os.path.isfile(fname):
|
0000-12-31 18:09:24 -05:50
|
|
|
os.rename(fname, fname + ".orig")
|
|
|
|
|
2008-08-07 15:14:37 -05:00
|
|
|
|
2007-08-24 12:31:45 -05:00
|
|
|
class CIDict(dict):
|
|
|
|
"""
|
|
|
|
Case-insensitive but case-respecting dictionary.
|
|
|
|
|
2007-08-27 11:49:35 -05:00
|
|
|
This code is derived from python-ldap's cidict.py module,
|
|
|
|
written by stroeder: http://python-ldap.sourceforge.net/
|
2007-08-24 12:31:45 -05:00
|
|
|
|
2007-08-27 11:49:35 -05:00
|
|
|
This version extends 'dict' so it works properly with TurboGears.
|
2007-08-24 12:31:45 -05:00
|
|
|
If you extend UserDict, isinstance(foo, dict) returns false.
|
|
|
|
"""
|
|
|
|
|
2013-02-05 09:24:46 -06:00
|
|
|
def __init__(self, default=None, **kwargs):
|
2007-08-24 12:31:45 -05:00
|
|
|
super(CIDict, self).__init__()
|
2013-02-05 09:24:46 -06:00
|
|
|
self._keys = {} # mapping of lowercased keys to proper case
|
|
|
|
if default:
|
|
|
|
self.update(default)
|
|
|
|
if kwargs:
|
|
|
|
self.update(kwargs)
|
2007-08-24 12:31:45 -05:00
|
|
|
|
2012-04-18 10:22:35 -05:00
|
|
|
def __getitem__(self, key):
|
|
|
|
return super(CIDict, self).__getitem__(key.lower())
|
2007-08-24 12:31:45 -05:00
|
|
|
|
2013-09-23 03:46:01 -05:00
|
|
|
def __setitem__(self, key, value, seen_keys=None):
|
|
|
|
"""cidict[key] = value
|
|
|
|
|
|
|
|
The ``seen_keys`` argument is used by ``update()`` to keep track of
|
|
|
|
duplicate keys. It should be an initially empty set that is
|
|
|
|
passed to all calls to __setitem__ that should not set duplicate keys.
|
|
|
|
"""
|
2012-04-18 10:22:35 -05:00
|
|
|
lower_key = key.lower()
|
2013-09-23 03:46:01 -05:00
|
|
|
if seen_keys is not None:
|
|
|
|
if lower_key in seen_keys:
|
|
|
|
raise ValueError('Duplicate key in update: %s' % key)
|
|
|
|
seen_keys.add(lower_key)
|
2007-08-24 12:31:45 -05:00
|
|
|
self._keys[lower_key] = key
|
2012-04-18 10:22:35 -05:00
|
|
|
return super(CIDict, self).__setitem__(lower_key, value)
|
2007-08-24 12:31:45 -05:00
|
|
|
|
2012-04-18 10:22:35 -05:00
|
|
|
def __delitem__(self, key):
|
|
|
|
lower_key = key.lower()
|
2007-08-24 12:31:45 -05:00
|
|
|
del self._keys[lower_key]
|
2013-02-05 09:24:46 -06:00
|
|
|
return super(CIDict, self).__delitem__(lower_key)
|
2007-08-24 12:31:45 -05:00
|
|
|
|
2013-02-05 09:24:46 -06:00
|
|
|
def update(self, new=None, **kwargs):
|
2013-09-23 03:46:01 -05:00
|
|
|
"""Update self from dict/iterable new and kwargs
|
|
|
|
|
|
|
|
Functions like ``dict.update()``.
|
|
|
|
|
|
|
|
Neither ``new`` nor ``kwargs`` may contain two keys that only differ in
|
|
|
|
case, as this situation would result in loss of data.
|
|
|
|
"""
|
|
|
|
seen = set()
|
2013-02-05 09:24:46 -06:00
|
|
|
if new:
|
|
|
|
try:
|
|
|
|
keys = new.keys
|
|
|
|
except AttributeError:
|
|
|
|
self.update(dict(new))
|
|
|
|
else:
|
|
|
|
for key in keys():
|
2013-09-23 03:46:01 -05:00
|
|
|
self.__setitem__(key, new[key], seen)
|
|
|
|
seen = set()
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
for key, value in kwargs.items():
|
2013-09-23 03:46:01 -05:00
|
|
|
self.__setitem__(key, value, seen)
|
2013-02-05 09:24:46 -06:00
|
|
|
|
|
|
|
def __contains__(self, key):
|
|
|
|
return super(CIDict, self).__contains__(key.lower())
|
2007-08-24 12:31:45 -05:00
|
|
|
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
if six.PY2:
|
2015-07-30 10:29:39 -05:00
|
|
|
def has_key(self, key):
|
2016-11-23 03:04:43 -06:00
|
|
|
# pylint: disable=no-member
|
2015-07-30 10:29:39 -05:00
|
|
|
return super(CIDict, self).has_key(key.lower())
|
2016-11-23 03:04:43 -06:00
|
|
|
# pylint: enable=no-member
|
2007-08-24 12:31:45 -05:00
|
|
|
|
2012-04-18 10:22:35 -05:00
|
|
|
def get(self, key, failobj=None):
|
2007-08-24 12:31:45 -05:00
|
|
|
try:
|
|
|
|
return self[key]
|
|
|
|
except KeyError:
|
|
|
|
return failobj
|
|
|
|
|
2013-02-05 09:24:46 -06:00
|
|
|
def __iter__(self):
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
return six.itervalues(self._keys)
|
2013-02-05 09:24:46 -06:00
|
|
|
|
2007-08-24 12:31:45 -05:00
|
|
|
def keys(self):
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
if six.PY2:
|
|
|
|
return list(self.iterkeys())
|
|
|
|
else:
|
|
|
|
return self.iterkeys()
|
2007-08-24 12:31:45 -05:00
|
|
|
|
|
|
|
def items(self):
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
if six.PY2:
|
|
|
|
return list(self.iteritems())
|
|
|
|
else:
|
|
|
|
return self.iteritems()
|
2013-02-05 09:24:46 -06:00
|
|
|
|
|
|
|
def values(self):
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
if six.PY2:
|
|
|
|
return list(self.itervalues())
|
|
|
|
else:
|
|
|
|
return self.itervalues()
|
2007-08-24 12:31:45 -05:00
|
|
|
|
|
|
|
def copy(self):
|
2013-02-05 09:24:46 -06:00
|
|
|
"""Returns a shallow copy of this CIDict"""
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
return CIDict(list(self.items()))
|
2007-08-24 12:31:45 -05:00
|
|
|
|
|
|
|
def iteritems(self):
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
return ((k, self[k]) for k in six.itervalues(self._keys))
|
2007-08-24 12:31:45 -05:00
|
|
|
|
|
|
|
def iterkeys(self):
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
return six.itervalues(self._keys)
|
2013-02-05 09:24:46 -06:00
|
|
|
|
|
|
|
def itervalues(self):
|
Use Python3-compatible dict method names
Python 2 has keys()/values()/items(), which return lists,
iterkeys()/itervalues()/iteritems(), which return iterators,
and viewkeys()/viewvalues()/viewitems() which return views.
Python 3 has only keys()/values()/items(), which return views.
To get iterators, one can use iter() or a for loop/comprehension;
for lists there's the list() constructor.
When iterating through the entire dict, without modifying the dict,
the difference between Python 2's items() and iteritems() is
negligible, especially on small dicts (the main overhead is
extra memory, not CPU time). In the interest of simpler code,
this patch changes many instances of iteritems() to items(),
iterkeys() to keys() etc.
In other cases, helpers like six.itervalues are used.
Reviewed-By: Christian Heimes <cheimes@redhat.com>
Reviewed-By: Jan Cholasta <jcholast@redhat.com>
2015-08-11 06:51:14 -05:00
|
|
|
return (v for k, v in six.iteritems(self))
|
2007-08-24 12:31:45 -05:00
|
|
|
|
2012-04-18 10:22:35 -05:00
|
|
|
def setdefault(self, key, value=None):
|
2007-08-24 12:31:45 -05:00
|
|
|
try:
|
|
|
|
return self[key]
|
|
|
|
except KeyError:
|
|
|
|
self[key] = value
|
|
|
|
return value
|
|
|
|
|
|
|
|
def pop(self, key, *args):
|
|
|
|
try:
|
|
|
|
value = self[key]
|
|
|
|
del self[key]
|
|
|
|
return value
|
|
|
|
except KeyError:
|
|
|
|
if len(args) == 1:
|
|
|
|
return args[0]
|
|
|
|
raise
|
|
|
|
|
|
|
|
def popitem(self):
|
2012-04-18 10:22:35 -05:00
|
|
|
(lower_key, value) = super(CIDict, self).popitem()
|
2007-08-24 12:31:45 -05:00
|
|
|
key = self._keys[lower_key]
|
|
|
|
del self._keys[lower_key]
|
|
|
|
|
2012-04-18 10:22:35 -05:00
|
|
|
return (key, value)
|
2007-08-24 12:31:45 -05:00
|
|
|
|
2013-02-05 09:24:46 -06:00
|
|
|
def clear(self):
|
|
|
|
self._keys.clear()
|
|
|
|
return super(CIDict, self).clear()
|
|
|
|
|
|
|
|
def viewitems(self):
|
|
|
|
raise NotImplementedError('CIDict.viewitems is not implemented')
|
|
|
|
|
|
|
|
def viewkeys(self):
|
|
|
|
raise NotImplementedError('CIDict.viewkeys is not implemented')
|
|
|
|
|
|
|
|
def viewvvalues(self):
|
|
|
|
raise NotImplementedError('CIDict.viewvvalues is not implemented')
|
|
|
|
|
2007-08-24 12:31:45 -05:00
|
|
|
|
2007-09-04 15:44:59 -05:00
|
|
|
class GeneralizedTimeZone(datetime.tzinfo):
|
|
|
|
"""This class is a basic timezone wrapper for the offset specified
|
|
|
|
in a Generalized Time. It is dst-ignorant."""
|
|
|
|
def __init__(self,offsetstr="Z"):
|
|
|
|
super(GeneralizedTimeZone, self).__init__()
|
|
|
|
|
|
|
|
self.name = offsetstr
|
|
|
|
self.houroffset = 0
|
|
|
|
self.minoffset = 0
|
|
|
|
|
|
|
|
if offsetstr == "Z":
|
|
|
|
self.houroffset = 0
|
|
|
|
self.minoffset = 0
|
|
|
|
else:
|
|
|
|
if (len(offsetstr) >= 3) and re.match(r'[-+]\d\d', offsetstr):
|
|
|
|
self.houroffset = int(offsetstr[0:3])
|
|
|
|
offsetstr = offsetstr[3:]
|
|
|
|
if (len(offsetstr) >= 2) and re.match(r'\d\d', offsetstr):
|
|
|
|
self.minoffset = int(offsetstr[0:2])
|
|
|
|
offsetstr = offsetstr[2:]
|
|
|
|
if len(offsetstr) > 0:
|
|
|
|
raise ValueError()
|
|
|
|
if self.houroffset < 0:
|
|
|
|
self.minoffset *= -1
|
|
|
|
|
2010-12-15 13:55:30 -06:00
|
|
|
def utcoffset(self, dt):
|
2007-09-04 15:44:59 -05:00
|
|
|
return datetime.timedelta(hours=self.houroffset, minutes=self.minoffset)
|
|
|
|
|
2009-08-11 16:08:09 -05:00
|
|
|
def dst(self):
|
2007-09-04 15:44:59 -05:00
|
|
|
return datetime.timedelta(0)
|
|
|
|
|
2009-08-11 16:08:09 -05:00
|
|
|
def tzname(self):
|
2007-09-04 15:44:59 -05:00
|
|
|
return self.name
|
|
|
|
|
|
|
|
|
|
|
|
def parse_generalized_time(timestr):
|
|
|
|
"""Parses are Generalized Time string (as specified in X.680),
|
|
|
|
returning a datetime object. Generalized Times are stored inside
|
|
|
|
the krbPasswordExpiration attribute in LDAP.
|
|
|
|
|
|
|
|
This method doesn't attempt to be perfect wrt timezones. If python
|
|
|
|
can't be bothered to implement them, how can we..."""
|
|
|
|
|
|
|
|
if len(timestr) < 8:
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
date = timestr[:8]
|
|
|
|
time = timestr[8:]
|
|
|
|
|
|
|
|
year = int(date[:4])
|
|
|
|
month = int(date[4:6])
|
|
|
|
day = int(date[6:8])
|
|
|
|
|
|
|
|
hour = min = sec = msec = 0
|
|
|
|
tzone = None
|
|
|
|
|
|
|
|
if (len(time) >= 2) and re.match(r'\d', time[0]):
|
|
|
|
hour = int(time[:2])
|
|
|
|
time = time[2:]
|
|
|
|
if len(time) >= 2 and (time[0] == "," or time[0] == "."):
|
|
|
|
hour_fraction = "."
|
|
|
|
time = time[1:]
|
|
|
|
while (len(time) > 0) and re.match(r'\d', time[0]):
|
|
|
|
hour_fraction += time[0]
|
|
|
|
time = time[1:]
|
|
|
|
total_secs = int(float(hour_fraction) * 3600)
|
|
|
|
min, sec = divmod(total_secs, 60)
|
|
|
|
|
|
|
|
if (len(time) >= 2) and re.match(r'\d', time[0]):
|
|
|
|
min = int(time[:2])
|
|
|
|
time = time[2:]
|
|
|
|
if len(time) >= 2 and (time[0] == "," or time[0] == "."):
|
|
|
|
min_fraction = "."
|
|
|
|
time = time[1:]
|
|
|
|
while (len(time) > 0) and re.match(r'\d', time[0]):
|
|
|
|
min_fraction += time[0]
|
|
|
|
time = time[1:]
|
|
|
|
sec = int(float(min_fraction) * 60)
|
|
|
|
|
|
|
|
if (len(time) >= 2) and re.match(r'\d', time[0]):
|
|
|
|
sec = int(time[:2])
|
|
|
|
time = time[2:]
|
|
|
|
if len(time) >= 2 and (time[0] == "," or time[0] == "."):
|
|
|
|
sec_fraction = "."
|
|
|
|
time = time[1:]
|
|
|
|
while (len(time) > 0) and re.match(r'\d', time[0]):
|
|
|
|
sec_fraction += time[0]
|
|
|
|
time = time[1:]
|
|
|
|
msec = int(float(sec_fraction) * 1000000)
|
|
|
|
|
|
|
|
if (len(time) > 0):
|
|
|
|
tzone = GeneralizedTimeZone(time)
|
|
|
|
|
|
|
|
return datetime.datetime(year, month, day, hour, min, sec, msec, tzone)
|
|
|
|
|
|
|
|
except ValueError:
|
|
|
|
return None
|
2007-11-03 11:22:20 -05:00
|
|
|
|
2016-12-21 08:07:34 -06:00
|
|
|
|
|
|
|
def ipa_generate_password(entropy_bits=256, uppercase=1, lowercase=1, digits=1,
|
|
|
|
special=1, min_len=0):
|
|
|
|
"""
|
|
|
|
Generate token containing at least `entropy_bits` bits and with the given
|
|
|
|
character restraints.
|
|
|
|
|
|
|
|
:param entropy_bits:
|
|
|
|
The minimal number of entropy bits attacker has to guess:
|
|
|
|
128 bits entropy: secure
|
|
|
|
256 bits of entropy: secure enough if you care about quantum
|
|
|
|
computers
|
|
|
|
|
|
|
|
Integer values specify minimal number of characters from given
|
|
|
|
character class and length.
|
|
|
|
Value None prevents given character from appearing in the token.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
TokenGenerator(uppercase=3, lowercase=3, digits=0, special=None)
|
|
|
|
|
|
|
|
At least 3 upper and 3 lower case ASCII chars, may contain digits,
|
|
|
|
no special chars.
|
|
|
|
"""
|
|
|
|
special_chars = '!$%&()*+,-./:;<>?@[]^_{|}~'
|
|
|
|
pwd_charsets = {
|
|
|
|
'uppercase': {
|
|
|
|
'chars': string.ascii_uppercase,
|
|
|
|
'entropy': math.log(len(string.ascii_uppercase), 2)
|
|
|
|
},
|
|
|
|
'lowercase': {
|
|
|
|
'chars': string.ascii_lowercase,
|
|
|
|
'entropy': math.log(len(string.ascii_lowercase), 2)
|
|
|
|
},
|
|
|
|
'digits': {
|
|
|
|
'chars': string.digits,
|
|
|
|
'entropy': math.log(len(string.digits), 2)
|
|
|
|
},
|
|
|
|
'special': {
|
|
|
|
'chars': special_chars,
|
|
|
|
'entropy': math.log(len(special_chars), 2)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
req_classes = dict(
|
|
|
|
uppercase=uppercase,
|
|
|
|
lowercase=lowercase,
|
|
|
|
digits=digits,
|
|
|
|
special=special
|
|
|
|
)
|
|
|
|
# 'all' class is used when adding entropy to too-short tokens
|
|
|
|
# it contains characters from all allowed classes
|
|
|
|
pwd_charsets['all'] = {
|
|
|
|
'chars': ''.join([
|
|
|
|
charclass['chars'] for charclass_name, charclass
|
|
|
|
in pwd_charsets.items()
|
|
|
|
if req_classes[charclass_name] is not None
|
|
|
|
])
|
|
|
|
}
|
|
|
|
pwd_charsets['all']['entropy'] = math.log(
|
|
|
|
len(pwd_charsets['all']['chars']), 2)
|
|
|
|
rnd = random.SystemRandom()
|
|
|
|
|
|
|
|
todo_entropy = entropy_bits
|
2017-01-10 06:45:11 -06:00
|
|
|
password = u''
|
2016-12-21 08:07:34 -06:00
|
|
|
# Generate required character classes:
|
|
|
|
# The order of generated characters is fixed to comply with check in
|
|
|
|
# NSS function sftk_newPinCheck() in nss/lib/softoken/fipstokn.c.
|
|
|
|
for charclass_name in ['digits', 'uppercase', 'lowercase', 'special']:
|
|
|
|
charclass = pwd_charsets[charclass_name]
|
|
|
|
todo_characters = req_classes[charclass_name]
|
2017-02-07 12:24:46 -06:00
|
|
|
if todo_characters is None:
|
|
|
|
continue
|
2016-12-21 08:07:34 -06:00
|
|
|
while todo_characters > 0:
|
|
|
|
password += rnd.choice(charclass['chars'])
|
|
|
|
todo_entropy -= charclass['entropy']
|
|
|
|
todo_characters -= 1
|
|
|
|
|
|
|
|
# required character classes do not provide sufficient entropy
|
|
|
|
# or does not fulfill minimal length constraint
|
|
|
|
allchars = pwd_charsets['all']
|
|
|
|
while todo_entropy > 0 or len(password) < min_len:
|
|
|
|
password += rnd.choice(allchars['chars'])
|
|
|
|
todo_entropy -= allchars['entropy']
|
|
|
|
|
|
|
|
return password
|
|
|
|
|
0000-12-31 18:09:24 -05:50
|
|
|
|
2008-07-21 05:25:37 -05:00
|
|
|
def user_input(prompt, default = None, allow_empty = True):
|
2017-12-15 10:00:04 -06:00
|
|
|
if default is None:
|
2008-07-21 05:25:37 -05:00
|
|
|
while True:
|
2015-08-19 06:43:43 -05:00
|
|
|
try:
|
2015-08-11 10:07:11 -05:00
|
|
|
ret = input("%s: " % prompt)
|
2015-08-19 06:43:43 -05:00
|
|
|
if allow_empty or ret.strip():
|
2015-10-29 08:37:36 -05:00
|
|
|
return ret.strip()
|
2015-08-19 06:43:43 -05:00
|
|
|
except EOFError:
|
|
|
|
if allow_empty:
|
|
|
|
return ''
|
|
|
|
raise RuntimeError("Failed to get user input")
|
2009-02-05 12:12:11 -06:00
|
|
|
|
2015-08-10 11:29:33 -05:00
|
|
|
if isinstance(default, six.string_types):
|
2008-07-21 05:25:37 -05:00
|
|
|
while True:
|
2015-08-19 06:43:43 -05:00
|
|
|
try:
|
2015-08-11 10:07:11 -05:00
|
|
|
ret = input("%s [%s]: " % (prompt, default))
|
2015-08-19 06:43:43 -05:00
|
|
|
if not ret and (allow_empty or default):
|
|
|
|
return default
|
|
|
|
elif ret.strip():
|
2015-10-29 08:37:36 -05:00
|
|
|
return ret.strip()
|
2015-08-19 06:43:43 -05:00
|
|
|
except EOFError:
|
2008-07-21 05:25:37 -05:00
|
|
|
return default
|
2015-08-19 06:43:43 -05:00
|
|
|
|
2008-07-21 05:25:37 -05:00
|
|
|
if isinstance(default, bool):
|
2015-08-19 06:43:43 -05:00
|
|
|
choice = "yes" if default else "no"
|
2008-07-21 05:25:37 -05:00
|
|
|
while True:
|
2015-08-19 06:43:43 -05:00
|
|
|
try:
|
2015-08-11 10:07:11 -05:00
|
|
|
ret = input("%s [%s]: " % (prompt, choice))
|
2015-10-29 08:37:36 -05:00
|
|
|
ret = ret.strip()
|
2015-08-19 06:43:43 -05:00
|
|
|
if not ret:
|
|
|
|
return default
|
|
|
|
elif ret.lower()[0] == "y":
|
|
|
|
return True
|
|
|
|
elif ret.lower()[0] == "n":
|
|
|
|
return False
|
|
|
|
except EOFError:
|
2008-07-21 05:25:37 -05:00
|
|
|
return default
|
2015-08-19 06:43:43 -05:00
|
|
|
|
2008-07-21 05:25:37 -05:00
|
|
|
if isinstance(default, int):
|
|
|
|
while True:
|
|
|
|
try:
|
2015-08-11 10:07:11 -05:00
|
|
|
ret = input("%s [%s]: " % (prompt, default))
|
2015-10-29 08:37:36 -05:00
|
|
|
ret = ret.strip()
|
2008-07-21 05:25:37 -05:00
|
|
|
if not ret:
|
|
|
|
return default
|
|
|
|
ret = int(ret)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
2015-08-19 06:43:43 -05:00
|
|
|
except EOFError:
|
|
|
|
return default
|
2008-07-21 05:25:37 -05:00
|
|
|
else:
|
|
|
|
return ret
|
|
|
|
|
2017-12-15 10:00:04 -06:00
|
|
|
return None
|
|
|
|
|
2007-11-26 19:59:53 -06:00
|
|
|
|
2016-11-29 11:19:07 -06:00
|
|
|
def host_port_open(host, port, socket_type=socket.SOCK_STREAM,
|
2017-08-03 08:48:33 -05:00
|
|
|
socket_timeout=None, log_errors=False,
|
|
|
|
log_level=logging.DEBUG):
|
2016-11-29 11:19:07 -06:00
|
|
|
"""
|
|
|
|
host: either hostname or IP address;
|
|
|
|
if hostname is provided, port MUST be open on ALL resolved IPs
|
|
|
|
|
|
|
|
returns True is port is open, False otherwise
|
|
|
|
"""
|
|
|
|
port_open = True
|
|
|
|
|
|
|
|
# port has to be open on ALL resolved IPs
|
2012-07-03 09:49:10 -05:00
|
|
|
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket_type):
|
2016-10-07 08:07:49 -05:00
|
|
|
af, socktype, proto, _canonname, sa = res
|
2017-01-05 12:51:52 -06:00
|
|
|
s = None
|
2011-05-22 12:17:07 -05:00
|
|
|
try:
|
2016-11-29 11:19:07 -06:00
|
|
|
s = socket.socket(af, socktype, proto)
|
2011-05-22 12:17:07 -05:00
|
|
|
|
|
|
|
if socket_timeout is not None:
|
|
|
|
s.settimeout(socket_timeout)
|
|
|
|
|
2012-07-03 09:49:10 -05:00
|
|
|
s.connect(sa)
|
2012-02-01 10:12:17 -06:00
|
|
|
|
|
|
|
if socket_type == socket.SOCK_DGRAM:
|
2017-08-17 07:59:08 -05:00
|
|
|
s.send(b'')
|
2012-02-01 10:12:17 -06:00
|
|
|
s.recv(512)
|
2016-10-07 08:07:49 -05:00
|
|
|
except socket.error:
|
2016-11-29 11:19:07 -06:00
|
|
|
port_open = False
|
|
|
|
if log_errors:
|
2017-08-03 08:48:33 -05:00
|
|
|
msg = ('Failed to connect to port %(port)s %(proto)s on '
|
2016-11-29 11:19:07 -06:00
|
|
|
'%(addr)s' % dict(port=port,
|
|
|
|
proto=PROTOCOL_NAMES[socket_type],
|
|
|
|
addr=sa[0]))
|
2017-08-03 08:48:33 -05:00
|
|
|
logger.log(log_level, msg)
|
2011-05-22 12:17:07 -05:00
|
|
|
finally:
|
2017-01-05 12:51:52 -06:00
|
|
|
if s is not None:
|
2012-07-03 09:49:10 -05:00
|
|
|
s.close()
|
2011-05-22 12:17:07 -05:00
|
|
|
|
2016-11-29 11:19:07 -06:00
|
|
|
return port_open
|
2011-05-22 12:17:07 -05:00
|
|
|
|
2012-05-11 07:38:09 -05:00
|
|
|
|
2018-03-28 05:05:34 -05:00
|
|
|
def check_port_bindable(port, socket_type=socket.SOCK_STREAM):
|
|
|
|
"""Check if a port is free and not bound by any other application
|
|
|
|
|
|
|
|
:param port: port number
|
|
|
|
:param socket_type: type (SOCK_STREAM for TCP, SOCK_DGRAM for UDP)
|
|
|
|
|
|
|
|
Returns True if the port is free, False otherwise
|
|
|
|
"""
|
|
|
|
if socket_type == socket.SOCK_STREAM:
|
|
|
|
proto = 'TCP'
|
|
|
|
elif socket_type == socket.SOCK_DGRAM:
|
|
|
|
proto = 'UDP'
|
|
|
|
else:
|
|
|
|
raise ValueError(socket_type)
|
|
|
|
|
|
|
|
# Detect dual stack or IPv4 single stack
|
|
|
|
try:
|
|
|
|
s = socket.socket(socket.AF_INET6, socket_type)
|
|
|
|
anyaddr = '::'
|
|
|
|
logger.debug(
|
|
|
|
"check_port_bindable: Checking IPv4/IPv6 dual stack and %s",
|
|
|
|
proto
|
|
|
|
)
|
|
|
|
except socket.error:
|
|
|
|
s = socket.socket(socket.AF_INET, socket_type)
|
|
|
|
anyaddr = ''
|
|
|
|
logger.debug("check_port_bindable: Checking IPv4 only and %s", proto)
|
|
|
|
|
|
|
|
# Attempt to bind
|
|
|
|
try:
|
|
|
|
if socket_type == socket.SOCK_STREAM:
|
|
|
|
# reuse TCP sockets in TIME_WAIT state
|
|
|
|
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
|
|
|
|
s.bind((anyaddr, port))
|
|
|
|
except socket.error as e:
|
|
|
|
logger.debug(
|
|
|
|
"check_port_bindable: failed to bind to port %i/%s: %s",
|
|
|
|
port, proto, e
|
|
|
|
)
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
logger.debug(
|
|
|
|
"check_port_bindable: bind success: %i/%s", port, proto
|
|
|
|
)
|
|
|
|
return True
|
|
|
|
finally:
|
|
|
|
s.close()
|
|
|
|
|
|
|
|
|
2015-12-02 08:20:50 -06:00
|
|
|
def reverse_record_exists(ip_address):
|
|
|
|
"""
|
|
|
|
Checks if IP address have some reverse record somewhere.
|
|
|
|
Does not care where it points.
|
|
|
|
|
|
|
|
Returns True/False
|
|
|
|
"""
|
|
|
|
reverse = reversename.from_address(str(ip_address))
|
|
|
|
try:
|
|
|
|
resolver.query(reverse, "PTR")
|
|
|
|
except DNSException:
|
|
|
|
# really don't care what exception, PTR is simply unresolvable
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
Refactor backup_and_replace_hostname() into a flexible config modification tool
backup_and_replace_hostname() was doing three things:
1. Given config file in 'key=value' style, replace value for a
specified key (HOSTNAME)
2. Backup original file and install a replacement
3. Restore original security context after editing
We have several more places where parts of the functionality are needed,
thus making two tools in ipapython.ipautil:
1. config_replace_variables(filepath, replacevars=dict(),
appendvars=dict())
Replaces or appends values to specified keys, adding new key=value
pairs if key was absent
2. backup_config_and_replace_variables(fstore, filepath,
replacevars=dict(),
appendvars=dict())
Backups config file and calls config_replace_variables()
A caller must handle security context after using these two tools.
In addition, as before, there is
ipapython.services.backup_and_replace_hostname() that uses
these common tools and restores security context after editing.
The code will be used extensively for systemd integration for Fedora 16.
Fixes:
https://fedorahosted.org/freeipa/ticket/1871
2011-10-12 08:42:09 -05:00
|
|
|
def config_replace_variables(filepath, replacevars=dict(), appendvars=dict()):
|
|
|
|
"""
|
|
|
|
Take a key=value based configuration file, and write new version
|
|
|
|
with certain values replaced or appended
|
|
|
|
|
|
|
|
All (key,value) pairs from replacevars and appendvars that were not found
|
|
|
|
in the configuration file, will be added there.
|
|
|
|
|
|
|
|
It is responsibility of a caller to ensure that replacevars and
|
|
|
|
appendvars do not overlap.
|
|
|
|
|
|
|
|
It is responsibility of a caller to back up file.
|
|
|
|
|
|
|
|
returns dictionary of affected keys and their previous values
|
|
|
|
|
|
|
|
One have to run restore_context(filepath) afterwards or
|
|
|
|
security context of the file will not be correct after modification
|
|
|
|
"""
|
|
|
|
pattern = re.compile('''
|
|
|
|
(^
|
|
|
|
\s*
|
|
|
|
(?P<option> [^\#;]+?)
|
|
|
|
(\s*=\s*)
|
|
|
|
(?P<value> .+?)?
|
|
|
|
(\s*((\#|;).*)?)?
|
|
|
|
$)''', re.VERBOSE)
|
|
|
|
orig_stat = os.stat(filepath)
|
|
|
|
old_values = dict()
|
|
|
|
temp_filename = None
|
2017-01-09 12:26:04 -06:00
|
|
|
with tempfile.NamedTemporaryFile(mode="w", delete=False) as new_config:
|
Refactor backup_and_replace_hostname() into a flexible config modification tool
backup_and_replace_hostname() was doing three things:
1. Given config file in 'key=value' style, replace value for a
specified key (HOSTNAME)
2. Backup original file and install a replacement
3. Restore original security context after editing
We have several more places where parts of the functionality are needed,
thus making two tools in ipapython.ipautil:
1. config_replace_variables(filepath, replacevars=dict(),
appendvars=dict())
Replaces or appends values to specified keys, adding new key=value
pairs if key was absent
2. backup_config_and_replace_variables(fstore, filepath,
replacevars=dict(),
appendvars=dict())
Backups config file and calls config_replace_variables()
A caller must handle security context after using these two tools.
In addition, as before, there is
ipapython.services.backup_and_replace_hostname() that uses
these common tools and restores security context after editing.
The code will be used extensively for systemd integration for Fedora 16.
Fixes:
https://fedorahosted.org/freeipa/ticket/1871
2011-10-12 08:42:09 -05:00
|
|
|
temp_filename = new_config.name
|
|
|
|
with open(filepath, 'r') as f:
|
|
|
|
for line in f:
|
|
|
|
new_line = line
|
|
|
|
m = pattern.match(line)
|
|
|
|
if m:
|
|
|
|
option, value = m.group('option', 'value')
|
|
|
|
if option is not None:
|
|
|
|
if replacevars and option in replacevars:
|
|
|
|
# replace value completely
|
|
|
|
new_line = u"%s=%s\n" % (option, replacevars[option])
|
|
|
|
old_values[option] = value
|
|
|
|
if appendvars and option in appendvars:
|
|
|
|
# append new value unless it is already existing in the original one
|
2011-10-14 08:36:26 -05:00
|
|
|
if not value:
|
|
|
|
new_line = u"%s=%s\n" % (option, appendvars[option])
|
|
|
|
elif value.find(appendvars[option]) == -1:
|
Refactor backup_and_replace_hostname() into a flexible config modification tool
backup_and_replace_hostname() was doing three things:
1. Given config file in 'key=value' style, replace value for a
specified key (HOSTNAME)
2. Backup original file and install a replacement
3. Restore original security context after editing
We have several more places where parts of the functionality are needed,
thus making two tools in ipapython.ipautil:
1. config_replace_variables(filepath, replacevars=dict(),
appendvars=dict())
Replaces or appends values to specified keys, adding new key=value
pairs if key was absent
2. backup_config_and_replace_variables(fstore, filepath,
replacevars=dict(),
appendvars=dict())
Backups config file and calls config_replace_variables()
A caller must handle security context after using these two tools.
In addition, as before, there is
ipapython.services.backup_and_replace_hostname() that uses
these common tools and restores security context after editing.
The code will be used extensively for systemd integration for Fedora 16.
Fixes:
https://fedorahosted.org/freeipa/ticket/1871
2011-10-12 08:42:09 -05:00
|
|
|
new_line = u"%s=%s %s\n" % (option, value, appendvars[option])
|
|
|
|
old_values[option] = value
|
|
|
|
new_config.write(new_line)
|
|
|
|
# Now add all options from replacevars and appendvars that were not found in the file
|
|
|
|
new_vars = replacevars.copy()
|
|
|
|
new_vars.update(appendvars)
|
2011-10-14 09:40:26 -05:00
|
|
|
newvars_view = set(new_vars.keys()) - set(old_values.keys())
|
2012-02-01 06:20:53 -06:00
|
|
|
append_view = (set(appendvars.keys()) - newvars_view)
|
Refactor backup_and_replace_hostname() into a flexible config modification tool
backup_and_replace_hostname() was doing three things:
1. Given config file in 'key=value' style, replace value for a
specified key (HOSTNAME)
2. Backup original file and install a replacement
3. Restore original security context after editing
We have several more places where parts of the functionality are needed,
thus making two tools in ipapython.ipautil:
1. config_replace_variables(filepath, replacevars=dict(),
appendvars=dict())
Replaces or appends values to specified keys, adding new key=value
pairs if key was absent
2. backup_config_and_replace_variables(fstore, filepath,
replacevars=dict(),
appendvars=dict())
Backups config file and calls config_replace_variables()
A caller must handle security context after using these two tools.
In addition, as before, there is
ipapython.services.backup_and_replace_hostname() that uses
these common tools and restores security context after editing.
The code will be used extensively for systemd integration for Fedora 16.
Fixes:
https://fedorahosted.org/freeipa/ticket/1871
2011-10-12 08:42:09 -05:00
|
|
|
for item in newvars_view:
|
|
|
|
new_config.write("%s=%s\n" % (item,new_vars[item]))
|
|
|
|
for item in append_view:
|
|
|
|
new_config.write("%s=%s\n" % (item,appendvars[item]))
|
|
|
|
new_config.flush()
|
|
|
|
# Make sure the resulting file is readable by others before installing it
|
|
|
|
os.fchmod(new_config.fileno(), orig_stat.st_mode)
|
|
|
|
os.fchown(new_config.fileno(), orig_stat.st_uid, orig_stat.st_gid)
|
|
|
|
|
|
|
|
# At this point new_config is closed but not removed due to 'delete=False' above
|
|
|
|
# Now, install the temporary file as configuration and ensure old version is available as .orig
|
2012-02-01 06:20:53 -06:00
|
|
|
# While .orig file is not used during uninstall, it is left there for administrator.
|
|
|
|
install_file(temp_filename, filepath)
|
|
|
|
|
|
|
|
return old_values
|
|
|
|
|
|
|
|
def inifile_replace_variables(filepath, section, replacevars=dict(), appendvars=dict()):
|
|
|
|
"""
|
|
|
|
Take a section-structured key=value based configuration file, and write new version
|
|
|
|
with certain values replaced or appended within the section
|
|
|
|
|
|
|
|
All (key,value) pairs from replacevars and appendvars that were not found
|
|
|
|
in the configuration file, will be added there.
|
|
|
|
|
|
|
|
It is responsibility of a caller to ensure that replacevars and
|
|
|
|
appendvars do not overlap.
|
|
|
|
|
|
|
|
It is responsibility of a caller to back up file.
|
|
|
|
|
|
|
|
returns dictionary of affected keys and their previous values
|
|
|
|
|
|
|
|
One have to run restore_context(filepath) afterwards or
|
|
|
|
security context of the file will not be correct after modification
|
|
|
|
"""
|
|
|
|
pattern = re.compile('''
|
|
|
|
(^
|
|
|
|
\[
|
|
|
|
(?P<section> .+) \]
|
|
|
|
(\s+((\#|;).*)?)?
|
|
|
|
$)|(^
|
|
|
|
\s*
|
|
|
|
(?P<option> [^\#;]+?)
|
|
|
|
(\s*=\s*)
|
|
|
|
(?P<value> .+?)?
|
|
|
|
(\s*((\#|;).*)?)?
|
|
|
|
$)''', re.VERBOSE)
|
|
|
|
def add_options(config, replacevars, appendvars, oldvars):
|
|
|
|
# add all options from replacevars and appendvars that were not found in the file
|
|
|
|
new_vars = replacevars.copy()
|
|
|
|
new_vars.update(appendvars)
|
|
|
|
newvars_view = set(new_vars.keys()) - set(oldvars.keys())
|
|
|
|
append_view = (set(appendvars.keys()) - newvars_view)
|
|
|
|
for item in newvars_view:
|
|
|
|
config.write("%s=%s\n" % (item,new_vars[item]))
|
|
|
|
for item in append_view:
|
|
|
|
config.write("%s=%s\n" % (item,appendvars[item]))
|
|
|
|
|
|
|
|
orig_stat = os.stat(filepath)
|
|
|
|
old_values = dict()
|
|
|
|
temp_filename = None
|
2017-01-09 12:26:04 -06:00
|
|
|
with tempfile.NamedTemporaryFile(mode='w', delete=False) as new_config:
|
2012-02-01 06:20:53 -06:00
|
|
|
temp_filename = new_config.name
|
|
|
|
with open(filepath, 'r') as f:
|
|
|
|
in_section = False
|
|
|
|
finished = False
|
|
|
|
line_idx = 1
|
|
|
|
for line in f:
|
|
|
|
line_idx = line_idx + 1
|
|
|
|
new_line = line
|
|
|
|
m = pattern.match(line)
|
|
|
|
if m:
|
|
|
|
sect, option, value = m.group('section', 'option', 'value')
|
|
|
|
if in_section and sect is not None:
|
|
|
|
# End of the searched section, add remaining options
|
|
|
|
add_options(new_config, replacevars, appendvars, old_values)
|
|
|
|
finished = True
|
|
|
|
if sect is not None:
|
|
|
|
# New section is found, check whether it is the one we are looking for
|
|
|
|
in_section = (str(sect).lower() == str(section).lower())
|
|
|
|
if option is not None and in_section:
|
|
|
|
# Great, this is an option from the section we are loking for
|
|
|
|
if replacevars and option in replacevars:
|
|
|
|
# replace value completely
|
|
|
|
new_line = u"%s=%s\n" % (option, replacevars[option])
|
|
|
|
old_values[option] = value
|
|
|
|
if appendvars and option in appendvars:
|
|
|
|
# append a new value unless it is already existing in the original one
|
|
|
|
if not value:
|
|
|
|
new_line = u"%s=%s\n" % (option, appendvars[option])
|
|
|
|
elif value.find(appendvars[option]) == -1:
|
|
|
|
new_line = u"%s=%s %s\n" % (option, value, appendvars[option])
|
|
|
|
old_values[option] = value
|
|
|
|
new_config.write(new_line)
|
|
|
|
# We have finished parsing the original file.
|
|
|
|
# There are two remaining cases:
|
|
|
|
# 1. Section we were looking for was not found, we need to add it.
|
|
|
|
if not (in_section or finished):
|
|
|
|
new_config.write("[%s]\n" % (section))
|
|
|
|
# 2. The section is the last one but some options were not found, add them.
|
|
|
|
if in_section or not finished:
|
|
|
|
add_options(new_config, replacevars, appendvars, old_values)
|
|
|
|
|
|
|
|
new_config.flush()
|
|
|
|
# Make sure the resulting file is readable by others before installing it
|
|
|
|
os.fchmod(new_config.fileno(), orig_stat.st_mode)
|
|
|
|
os.fchown(new_config.fileno(), orig_stat.st_uid, orig_stat.st_gid)
|
|
|
|
|
|
|
|
# At this point new_config is closed but not removed due to 'delete=False' above
|
|
|
|
# Now, install the temporary file as configuration and ensure old version is available as .orig
|
Refactor backup_and_replace_hostname() into a flexible config modification tool
backup_and_replace_hostname() was doing three things:
1. Given config file in 'key=value' style, replace value for a
specified key (HOSTNAME)
2. Backup original file and install a replacement
3. Restore original security context after editing
We have several more places where parts of the functionality are needed,
thus making two tools in ipapython.ipautil:
1. config_replace_variables(filepath, replacevars=dict(),
appendvars=dict())
Replaces or appends values to specified keys, adding new key=value
pairs if key was absent
2. backup_config_and_replace_variables(fstore, filepath,
replacevars=dict(),
appendvars=dict())
Backups config file and calls config_replace_variables()
A caller must handle security context after using these two tools.
In addition, as before, there is
ipapython.services.backup_and_replace_hostname() that uses
these common tools and restores security context after editing.
The code will be used extensively for systemd integration for Fedora 16.
Fixes:
https://fedorahosted.org/freeipa/ticket/1871
2011-10-12 08:42:09 -05:00
|
|
|
# While .orig file is not used during uninstall, it is left there for administrator.
|
|
|
|
install_file(temp_filename, filepath)
|
|
|
|
|
|
|
|
return old_values
|
|
|
|
|
2012-07-19 08:07:23 -05:00
|
|
|
def backup_config_and_replace_variables(
|
|
|
|
fstore, filepath, replacevars=dict(), appendvars=dict()):
|
Refactor backup_and_replace_hostname() into a flexible config modification tool
backup_and_replace_hostname() was doing three things:
1. Given config file in 'key=value' style, replace value for a
specified key (HOSTNAME)
2. Backup original file and install a replacement
3. Restore original security context after editing
We have several more places where parts of the functionality are needed,
thus making two tools in ipapython.ipautil:
1. config_replace_variables(filepath, replacevars=dict(),
appendvars=dict())
Replaces or appends values to specified keys, adding new key=value
pairs if key was absent
2. backup_config_and_replace_variables(fstore, filepath,
replacevars=dict(),
appendvars=dict())
Backups config file and calls config_replace_variables()
A caller must handle security context after using these two tools.
In addition, as before, there is
ipapython.services.backup_and_replace_hostname() that uses
these common tools and restores security context after editing.
The code will be used extensively for systemd integration for Fedora 16.
Fixes:
https://fedorahosted.org/freeipa/ticket/1871
2011-10-12 08:42:09 -05:00
|
|
|
"""
|
|
|
|
Take a key=value based configuration file, back up it, and
|
|
|
|
write new version with certain values replaced or appended
|
|
|
|
|
|
|
|
All (key,value) pairs from replacevars and appendvars that
|
|
|
|
were not found in the configuration file, will be added there.
|
2012-07-19 08:07:23 -05:00
|
|
|
The file must exist before this function is called.
|
Refactor backup_and_replace_hostname() into a flexible config modification tool
backup_and_replace_hostname() was doing three things:
1. Given config file in 'key=value' style, replace value for a
specified key (HOSTNAME)
2. Backup original file and install a replacement
3. Restore original security context after editing
We have several more places where parts of the functionality are needed,
thus making two tools in ipapython.ipautil:
1. config_replace_variables(filepath, replacevars=dict(),
appendvars=dict())
Replaces or appends values to specified keys, adding new key=value
pairs if key was absent
2. backup_config_and_replace_variables(fstore, filepath,
replacevars=dict(),
appendvars=dict())
Backups config file and calls config_replace_variables()
A caller must handle security context after using these two tools.
In addition, as before, there is
ipapython.services.backup_and_replace_hostname() that uses
these common tools and restores security context after editing.
The code will be used extensively for systemd integration for Fedora 16.
Fixes:
https://fedorahosted.org/freeipa/ticket/1871
2011-10-12 08:42:09 -05:00
|
|
|
|
|
|
|
It is responsibility of a caller to ensure that replacevars and
|
|
|
|
appendvars do not overlap.
|
|
|
|
|
|
|
|
returns dictionary of affected keys and their previous values
|
|
|
|
|
|
|
|
One have to run restore_context(filepath) afterwards or
|
|
|
|
security context of the file will not be correct after modification
|
|
|
|
"""
|
|
|
|
# Backup original filepath
|
|
|
|
fstore.backup_file(filepath)
|
|
|
|
old_values = config_replace_variables(filepath, replacevars, appendvars)
|
|
|
|
|
|
|
|
return old_values
|
2011-12-07 01:50:31 -06:00
|
|
|
|
2012-04-18 10:22:35 -05:00
|
|
|
|
2012-05-24 10:23:36 -05:00
|
|
|
def wait_for_open_ports(host, ports, timeout=0):
|
|
|
|
"""
|
|
|
|
Wait until the specified port(s) on the remote host are open. Timeout
|
2012-07-03 09:49:10 -05:00
|
|
|
in seconds may be specified to limit the wait. If the timeout is
|
|
|
|
exceeded, socket.timeout exception is raised.
|
2012-05-24 10:23:36 -05:00
|
|
|
"""
|
2014-06-13 05:47:48 -05:00
|
|
|
timeout = float(timeout)
|
2012-05-24 10:23:36 -05:00
|
|
|
if not isinstance(ports, (tuple, list)):
|
|
|
|
ports = [ports]
|
|
|
|
|
2017-05-24 09:35:07 -05:00
|
|
|
logger.debug('wait_for_open_ports: %s %s timeout %d', host, ports, timeout)
|
2012-05-24 10:23:36 -05:00
|
|
|
op_timeout = time.time() + timeout
|
|
|
|
|
|
|
|
for port in ports:
|
2017-08-03 09:03:29 -05:00
|
|
|
logger.debug('waiting for port: %s', port)
|
|
|
|
log_error = True
|
2012-05-24 10:23:36 -05:00
|
|
|
while True:
|
2017-08-03 09:03:29 -05:00
|
|
|
port_open = host_port_open(host, port, log_errors=log_error)
|
|
|
|
log_error = False # Log only first err so that the log is readable
|
2012-07-03 09:49:10 -05:00
|
|
|
|
|
|
|
if port_open:
|
2017-08-03 09:03:29 -05:00
|
|
|
logger.debug('SUCCESS: port: %s', port)
|
2012-05-24 10:23:36 -05:00
|
|
|
break
|
2012-07-03 09:49:10 -05:00
|
|
|
if timeout and time.time() > op_timeout: # timeout exceeded
|
2015-01-22 10:09:22 -06:00
|
|
|
raise socket.timeout("Timeout exceeded")
|
2012-07-03 09:49:10 -05:00
|
|
|
time.sleep(1)
|
2012-05-24 10:23:36 -05:00
|
|
|
|
2017-08-03 09:03:29 -05:00
|
|
|
|
2012-05-24 10:23:36 -05:00
|
|
|
def wait_for_open_socket(socket_name, timeout=0):
|
|
|
|
"""
|
|
|
|
Wait until the specified socket on the local host is open. Timeout
|
|
|
|
in seconds may be specified to limit the wait.
|
|
|
|
"""
|
2014-06-13 05:47:48 -05:00
|
|
|
timeout = float(timeout)
|
2012-05-24 10:23:36 -05:00
|
|
|
op_timeout = time.time() + timeout
|
|
|
|
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
s = socket.socket(socket.AF_UNIX)
|
|
|
|
s.connect(socket_name)
|
|
|
|
s.close()
|
|
|
|
break
|
2015-07-30 09:49:29 -05:00
|
|
|
except socket.error as e:
|
2012-05-24 10:23:36 -05:00
|
|
|
if e.errno in (2,111): # 111: Connection refused, 2: File not found
|
|
|
|
if timeout and time.time() > op_timeout: # timeout exceeded
|
|
|
|
raise e
|
|
|
|
time.sleep(1)
|
|
|
|
else:
|
|
|
|
raise e
|
2012-07-11 14:51:01 -05:00
|
|
|
|
2015-03-16 10:28:54 -05:00
|
|
|
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 06:36:35 -05:00
|
|
|
def dn_attribute_property(private_name):
|
|
|
|
'''
|
|
|
|
Create a property for a dn attribute which assures the attribute
|
|
|
|
is a DN or None. If the value is not None the setter converts it to
|
|
|
|
a DN. The getter assures it's either None or a DN instance.
|
|
|
|
|
|
|
|
The private_name parameter is the class internal attribute the property
|
|
|
|
shadows.
|
|
|
|
|
|
|
|
For example if a class has an attribute called base_dn, then:
|
|
|
|
|
|
|
|
base_dn = dn_attribute_property('_base_dn')
|
|
|
|
|
|
|
|
Thus the class with have an attriubte called base_dn which can only
|
|
|
|
ever be None or a DN instance. The actual value is stored in _base_dn.
|
|
|
|
'''
|
|
|
|
|
|
|
|
def setter(self, value):
|
|
|
|
if value is not None:
|
|
|
|
value = DN(value)
|
|
|
|
setattr(self, private_name, value)
|
|
|
|
|
|
|
|
def getter(self):
|
|
|
|
value = getattr(self, private_name)
|
|
|
|
if value is not None:
|
|
|
|
assert isinstance(value, DN)
|
|
|
|
return value
|
|
|
|
|
|
|
|
return property(getter, setter)
|
2012-12-05 03:50:05 -06:00
|
|
|
|
2015-09-23 06:27:35 -05:00
|
|
|
def posixify(string):
|
|
|
|
"""
|
|
|
|
Convert a string to a more strict alpha-numeric representation.
|
|
|
|
|
|
|
|
- Alpha-numeric, underscore, dot and dash characters are accepted
|
|
|
|
- Space is converted to underscore
|
|
|
|
- Other characters are omitted
|
|
|
|
- Leading dash is stripped
|
|
|
|
|
|
|
|
Note: This mapping is not one-to-one and may map different input to the
|
|
|
|
same result. When using posixify, make sure the you do not map two different
|
|
|
|
entities to one unintentionally.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def valid_char(char):
|
|
|
|
return char.isalnum() or char in ('_', '.', '-')
|
|
|
|
|
|
|
|
# First replace space characters
|
|
|
|
replaced = string.replace(' ','_')
|
|
|
|
omitted = ''.join(filter(valid_char, replaced))
|
|
|
|
|
|
|
|
# Leading dash is not allowed
|
|
|
|
return omitted.lstrip('-')
|
2015-06-04 06:59:22 -05:00
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def private_ccache(path=None):
|
|
|
|
|
|
|
|
if path is None:
|
2015-11-18 01:34:35 -06:00
|
|
|
dir_path = tempfile.mkdtemp(prefix='krbcc')
|
|
|
|
path = os.path.join(dir_path, 'ccache')
|
|
|
|
else:
|
|
|
|
dir_path = None
|
2015-06-04 06:59:22 -05:00
|
|
|
|
|
|
|
original_value = os.environ.get('KRB5CCNAME', None)
|
|
|
|
|
|
|
|
os.environ['KRB5CCNAME'] = path
|
|
|
|
|
|
|
|
try:
|
2017-01-24 18:51:24 -06:00
|
|
|
yield path
|
2015-06-04 06:59:22 -05:00
|
|
|
finally:
|
|
|
|
if original_value is not None:
|
|
|
|
os.environ['KRB5CCNAME'] = original_value
|
|
|
|
else:
|
2015-11-23 05:47:56 -06:00
|
|
|
os.environ.pop('KRB5CCNAME', None)
|
2015-06-04 06:59:22 -05:00
|
|
|
|
|
|
|
if os.path.exists(path):
|
|
|
|
os.remove(path)
|
2015-11-18 01:34:35 -06:00
|
|
|
if dir_path is not None:
|
|
|
|
try:
|
|
|
|
os.rmdir(dir_path)
|
|
|
|
except OSError:
|
|
|
|
pass
|
2015-08-31 02:08:38 -05:00
|
|
|
|
|
|
|
|
|
|
|
if six.PY2:
|
|
|
|
def fsdecode(value):
|
|
|
|
"""
|
|
|
|
Decode argument using the file system encoding, as returned by
|
|
|
|
`sys.getfilesystemencoding()`.
|
|
|
|
"""
|
|
|
|
if isinstance(value, six.binary_type):
|
|
|
|
return value.decode(sys.getfilesystemencoding())
|
|
|
|
elif isinstance(value, six.text_type):
|
|
|
|
return value
|
|
|
|
else:
|
|
|
|
raise TypeError("expect {0} or {1}, not {2}".format(
|
|
|
|
six.binary_type.__name__,
|
|
|
|
six.text_type.__name__,
|
|
|
|
type(value).__name__))
|
|
|
|
else:
|
|
|
|
fsdecode = os.fsdecode #pylint: disable=no-member
|
2016-06-27 03:23:14 -05:00
|
|
|
|
|
|
|
|
2016-09-23 08:53:41 -05:00
|
|
|
def unescape_seq(seq, *args):
|
|
|
|
"""
|
|
|
|
unescape (remove '\\') all occurences of sequence in input strings.
|
|
|
|
|
|
|
|
:param seq: sequence to unescape
|
|
|
|
:param args: input string to process
|
|
|
|
|
|
|
|
:returns: tuple of strings with unescaped sequences
|
|
|
|
"""
|
|
|
|
unescape_re = re.compile(r'\\{}'.format(seq))
|
|
|
|
|
|
|
|
return tuple(re.sub(unescape_re, seq, a) for a in args)
|
|
|
|
|
|
|
|
|
|
|
|
def escape_seq(seq, *args):
|
|
|
|
"""
|
|
|
|
escape (prepend '\\') all occurences of sequence in input strings
|
|
|
|
|
|
|
|
:param seq: sequence to escape
|
|
|
|
:param args: input string to process
|
|
|
|
|
|
|
|
:returns: tuple of strings with escaped sequences
|
|
|
|
"""
|
|
|
|
|
|
|
|
return tuple(a.replace(seq, u'\\{}'.format(seq)) for a in args)
|
2016-11-21 03:24:17 -06:00
|
|
|
|
|
|
|
|
2017-01-12 09:20:43 -06:00
|
|
|
def decode_json(data):
|
|
|
|
"""Decode JSON bytes to string with proper encoding
|
|
|
|
|
|
|
|
Only for supporting Py 3.5
|
|
|
|
|
|
|
|
Py 3.6 supports bytes as parameter for json.load, we can drop this when
|
|
|
|
there is no need for python 3.5 anymore
|
|
|
|
|
|
|
|
Code from:
|
|
|
|
https://bugs.python.org/file43513/json_detect_encoding_3.patch
|
|
|
|
|
|
|
|
:param data: JSON bytes
|
|
|
|
:return: return JSON string
|
|
|
|
"""
|
|
|
|
|
|
|
|
def detect_encoding(b):
|
|
|
|
bstartswith = b.startswith
|
|
|
|
if bstartswith((codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)):
|
|
|
|
return 'utf-32'
|
|
|
|
if bstartswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)):
|
|
|
|
return 'utf-16'
|
|
|
|
if bstartswith(codecs.BOM_UTF8):
|
|
|
|
return 'utf-8-sig'
|
|
|
|
|
|
|
|
if len(b) >= 4:
|
|
|
|
if not b[0]:
|
|
|
|
# 00 00 -- -- - utf-32-be
|
|
|
|
# 00 XX -- -- - utf-16-be
|
|
|
|
return 'utf-16-be' if b[1] else 'utf-32-be'
|
|
|
|
if not b[1]:
|
|
|
|
# XX 00 00 00 - utf-32-le
|
|
|
|
# XX 00 XX XX - utf-16-le
|
|
|
|
return 'utf-16-le' if b[2] or b[3] else 'utf-32-le'
|
|
|
|
elif len(b) == 2:
|
|
|
|
if not b[0]:
|
|
|
|
# 00 XX - utf-16-be
|
|
|
|
return 'utf-16-be'
|
|
|
|
if not b[1]:
|
|
|
|
# XX 00 - utf-16-le
|
|
|
|
return 'utf-16-le'
|
|
|
|
# default
|
|
|
|
return 'utf-8'
|
|
|
|
|
|
|
|
if isinstance(data, six.text_type):
|
|
|
|
return data
|
|
|
|
|
|
|
|
return data.decode(detect_encoding(data), 'surrogatepass')
|
|
|
|
|
|
|
|
|
2016-11-21 03:24:17 -06:00
|
|
|
class APIVersion(tuple):
|
|
|
|
"""API version parser and handler
|
|
|
|
|
|
|
|
The class is used to parse ipapython.version.API_VERSION and plugin
|
|
|
|
versions.
|
|
|
|
"""
|
|
|
|
__slots__ = ()
|
|
|
|
|
|
|
|
def __new__(cls, version):
|
|
|
|
major, dot, minor = version.partition(u'.')
|
|
|
|
major = int(major)
|
|
|
|
minor = int(minor) if dot else 0
|
|
|
|
return tuple.__new__(cls, (major, minor))
|
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return '{}.{}'.format(*self)
|
|
|
|
|
|
|
|
def __repr__(self):
|
|
|
|
return "<APIVersion('{}.{}')>".format(*self)
|
|
|
|
|
|
|
|
def __getnewargs__(self):
|
|
|
|
return str(self)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def major(self):
|
|
|
|
return self[0]
|
|
|
|
|
|
|
|
@property
|
|
|
|
def minor(self):
|
|
|
|
return self[1]
|