2008-12-10 22:14:05 -06:00
|
|
|
# Authors:
|
|
|
|
# Jason Gerard DeRose <jderose@redhat.com>
|
|
|
|
#
|
|
|
|
# Copyright (C) 2008 Red Hat
|
|
|
|
# see file 'COPYING' for use and warranty information
|
|
|
|
#
|
2010-12-09 06:59:11 -06:00
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
2008-12-10 22:14:05 -06:00
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
2010-12-09 06:59:11 -06:00
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2008-12-10 22:14:05 -06:00
|
|
|
|
|
|
|
"""
|
|
|
|
Parameter system for command plugins.
|
2009-01-14 11:58:05 -06:00
|
|
|
|
2010-02-19 10:08:16 -06:00
|
|
|
A `Param` instance can be used to describe an argument or option that a command
|
|
|
|
takes, or an attribute that a command returns. The `Param` base class is not
|
|
|
|
used directly, but there are many subclasses for specific Python data types
|
|
|
|
(like `Str` or `Int`) and specific properties (like `Password`).
|
|
|
|
|
|
|
|
To create a `Param` instance, you must always provide the parameter *name*,
|
|
|
|
which should be the LDAP attribute name if the parameter describes the attribute
|
|
|
|
of an LDAP entry. For example, we could create an `Str` instance describing the user's last-name attribute like this:
|
|
|
|
|
|
|
|
>>> from ipalib import Str
|
|
|
|
>>> sn = Str('sn')
|
|
|
|
>>> sn.name
|
|
|
|
'sn'
|
|
|
|
|
|
|
|
When creating a `Param`, there are also a number of optional kwargs which
|
|
|
|
which can provide additional meta-data and functionality. For example, every
|
|
|
|
parameter has a *cli_name*, the name used on the command-line-interface. By
|
|
|
|
default the *cli_name* is the same as the *name*:
|
|
|
|
|
|
|
|
>>> sn.cli_name
|
|
|
|
'sn'
|
|
|
|
|
|
|
|
But often the LDAP attribute name isn't user friendly for the command-line, so
|
|
|
|
you can override this with the *cli_name* kwarg:
|
|
|
|
|
|
|
|
>>> sn = Str('sn', cli_name='last')
|
|
|
|
>>> sn.name
|
|
|
|
'sn'
|
|
|
|
>>> sn.cli_name
|
|
|
|
'last'
|
|
|
|
|
|
|
|
Note that the RPC interfaces (and the internal processing pipeline) always use
|
|
|
|
the parameter *name*, regardless of what the *cli_name* might be.
|
|
|
|
|
|
|
|
A `Param` also has two translatable kwargs: *label* and *doc*. These must both
|
|
|
|
be `Gettext` instances. They both default to a place-holder `FixMe` instance,
|
|
|
|
a subclass of `Gettext` used to mark a missing translatable string:
|
|
|
|
|
|
|
|
>>> sn.label
|
|
|
|
FixMe('sn')
|
|
|
|
>>> sn.doc
|
|
|
|
FixMe('sn')
|
|
|
|
|
|
|
|
The *label* is a short phrase describing the parameter. It's used on the CLI
|
|
|
|
when interactively prompting for values, and as a label for form inputs in the
|
|
|
|
web-UI. The *label* should start with an initial capital. For example:
|
|
|
|
|
|
|
|
>>> from ipalib import _
|
|
|
|
>>> sn = Str('sn',
|
|
|
|
... cli_name='last',
|
|
|
|
... label=_('Last name'),
|
|
|
|
... )
|
|
|
|
>>> sn.label
|
2010-03-08 21:42:26 -06:00
|
|
|
Gettext('Last name', domain='ipa', localedir=None)
|
2010-02-19 10:08:16 -06:00
|
|
|
|
|
|
|
The *doc* is a longer description of the parameter. It's used on the CLI when
|
|
|
|
displaying the help information for a command, and as extra instruction for a
|
|
|
|
form input on the web-UI. By default the *doc* is the same as the *label*:
|
|
|
|
|
|
|
|
>>> sn.doc
|
2010-03-08 21:42:26 -06:00
|
|
|
Gettext('Last name', domain='ipa', localedir=None)
|
2010-02-19 10:08:16 -06:00
|
|
|
|
|
|
|
But you can override this with the *doc* kwarg. Like the *label*, the *doc*
|
|
|
|
should also start with an initial capital and should not end with any
|
|
|
|
punctuation. For example:
|
|
|
|
|
|
|
|
>>> sn = Str('sn',
|
|
|
|
... cli_name='last',
|
|
|
|
... label=_('Last name'),
|
|
|
|
... doc=_("The user's last name"),
|
|
|
|
... )
|
|
|
|
>>> sn.doc
|
2010-03-08 21:42:26 -06:00
|
|
|
Gettext("The user's last name", domain='ipa', localedir=None)
|
2010-02-19 10:08:16 -06:00
|
|
|
|
|
|
|
Demonstration aside, you should always provide at least the *label* so the
|
|
|
|
various UIs are translatable. Only provide the *doc* if the parameter needs
|
|
|
|
a more detailed description for clarity.
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
|
|
|
|
2009-02-06 13:36:49 -06:00
|
|
|
import re
|
2012-01-17 04:19:00 -06:00
|
|
|
import decimal
|
2012-01-23 08:50:41 -06:00
|
|
|
import base64
|
2014-01-09 04:14:56 -06:00
|
|
|
import datetime
|
2015-09-14 06:22:38 -05:00
|
|
|
from six.moves.xmlrpc_client import MAXINT, MININT
|
2015-07-31 03:15:01 -05:00
|
|
|
|
2015-08-10 11:29:33 -05:00
|
|
|
import six
|
2015-07-31 03:15:01 -05:00
|
|
|
|
|
|
|
from ipalib.text import _ as ugettext
|
2015-12-16 09:06:03 -06:00
|
|
|
from ipalib.base import check_name
|
|
|
|
from ipalib.plugable import ReadOnly, lock
|
2015-07-31 03:15:01 -05:00
|
|
|
from ipalib.errors import ConversionError, RequirementError, ValidationError
|
|
|
|
from ipalib.errors import PasswordMismatch, Base64DecodeError
|
|
|
|
from ipalib.constants import TYPE_ERROR, CALLABLE_ERROR, LDAP_GENERALIZED_TIME_FORMAT
|
|
|
|
from ipalib.text import Gettext, FixMe
|
|
|
|
from ipalib.util import json_serialize, validate_idna_domain
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 06:36:35 -05:00
|
|
|
from ipapython.dn import DN
|
2014-03-27 08:36:39 -05:00
|
|
|
from ipapython.dnsutil import DNSName
|
2009-02-06 13:36:49 -06:00
|
|
|
|
2014-02-21 10:38:32 -06:00
|
|
|
def _is_null(value):
|
|
|
|
return not value and value != 0 # NOTE: False == 0
|
|
|
|
|
2015-09-11 06:43:28 -05:00
|
|
|
if six.PY3:
|
|
|
|
unicode = str
|
|
|
|
|
2015-09-17 10:56:45 -05:00
|
|
|
|
2008-12-11 21:30:59 -06:00
|
|
|
class DefaultFrom(ReadOnly):
|
|
|
|
"""
|
|
|
|
Derive a default value from other supplied values.
|
|
|
|
|
|
|
|
For example, say you wanted to create a default for the user's login from
|
|
|
|
the user's first and last names. It could be implemented like this:
|
|
|
|
|
|
|
|
>>> login = DefaultFrom(lambda first, last: first[0] + last)
|
|
|
|
>>> login(first='John', last='Doe')
|
|
|
|
'JDoe'
|
|
|
|
|
2009-01-02 03:22:48 -06:00
|
|
|
If you do not explicitly provide keys when you create a `DefaultFrom`
|
2008-12-11 21:30:59 -06:00
|
|
|
instance, the keys are implicitly derived from your callback by
|
|
|
|
inspecting ``callback.func_code.co_varnames``. The keys are available
|
|
|
|
through the ``DefaultFrom.keys`` instance attribute, like this:
|
|
|
|
|
|
|
|
>>> login.keys
|
|
|
|
('first', 'last')
|
|
|
|
|
|
|
|
The callback is available through the ``DefaultFrom.callback`` instance
|
|
|
|
attribute, like this:
|
|
|
|
|
2009-01-02 03:22:48 -06:00
|
|
|
>>> login.callback # doctest:+ELLIPSIS
|
2008-12-11 21:30:59 -06:00
|
|
|
<function <lambda> at 0x...>
|
2009-01-02 03:22:48 -06:00
|
|
|
>>> login.callback.func_code.co_varnames # The keys
|
2008-12-11 21:30:59 -06:00
|
|
|
('first', 'last')
|
|
|
|
|
|
|
|
The keys can be explicitly provided as optional positional arguments after
|
|
|
|
the callback. For example, this is equivalent to the ``login`` instance
|
|
|
|
above:
|
|
|
|
|
|
|
|
>>> login2 = DefaultFrom(lambda a, b: a[0] + b, 'first', 'last')
|
|
|
|
>>> login2.keys
|
|
|
|
('first', 'last')
|
2009-01-02 03:22:48 -06:00
|
|
|
>>> login2.callback.func_code.co_varnames # Not the keys
|
2008-12-11 21:30:59 -06:00
|
|
|
('a', 'b')
|
|
|
|
>>> login2(first='John', last='Doe')
|
|
|
|
'JDoe'
|
|
|
|
|
2009-01-02 03:22:48 -06:00
|
|
|
If any keys are missing when calling your `DefaultFrom` instance, your
|
|
|
|
callback is not called and ``None`` is returned. For example:
|
2008-12-11 21:30:59 -06:00
|
|
|
|
|
|
|
>>> login(first='John', lastname='Doe') is None
|
|
|
|
True
|
|
|
|
>>> login() is None
|
|
|
|
True
|
|
|
|
|
|
|
|
Any additional keys are simply ignored, like this:
|
|
|
|
|
|
|
|
>>> login(last='Doe', first='John', middle='Whatever')
|
|
|
|
'JDoe'
|
|
|
|
|
|
|
|
As above, because `DefaultFrom.__call__` takes only pure keyword
|
|
|
|
arguments, they can be supplied in any order.
|
|
|
|
|
2009-01-02 03:22:48 -06:00
|
|
|
Of course, the callback need not be a ``lambda`` expression. This third
|
2008-12-11 21:30:59 -06:00
|
|
|
example is equivalent to both the ``login`` and ``login2`` instances
|
|
|
|
above:
|
|
|
|
|
|
|
|
>>> def get_login(first, last):
|
|
|
|
... return first[0] + last
|
|
|
|
...
|
|
|
|
>>> login3 = DefaultFrom(get_login)
|
|
|
|
>>> login3.keys
|
|
|
|
('first', 'last')
|
|
|
|
>>> login3.callback.func_code.co_varnames
|
|
|
|
('first', 'last')
|
|
|
|
>>> login3(first='John', last='Doe')
|
|
|
|
'JDoe'
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, callback, *keys):
|
|
|
|
"""
|
|
|
|
:param callback: The callable to call when all keys are present.
|
|
|
|
:param keys: Optional keys used for source values.
|
|
|
|
"""
|
|
|
|
if not callable(callback):
|
2008-12-18 03:08:41 -06:00
|
|
|
raise TypeError(
|
|
|
|
CALLABLE_ERROR % ('callback', callback, type(callback))
|
|
|
|
)
|
2008-12-11 21:30:59 -06:00
|
|
|
self.callback = callback
|
|
|
|
if len(keys) == 0:
|
2015-07-30 10:03:06 -05:00
|
|
|
fc = callback.__code__
|
2012-03-15 03:32:37 -05:00
|
|
|
if fc.co_flags & 0x0c:
|
|
|
|
raise ValueError("callback: variable-length argument list not allowed")
|
2008-12-11 21:30:59 -06:00
|
|
|
self.keys = fc.co_varnames[:fc.co_argcount]
|
|
|
|
else:
|
|
|
|
self.keys = keys
|
|
|
|
for key in self.keys:
|
|
|
|
if type(key) is not str:
|
2008-12-18 03:08:41 -06:00
|
|
|
raise TypeError(
|
|
|
|
TYPE_ERROR % ('keys', str, key, type(key))
|
|
|
|
)
|
2008-12-11 21:30:59 -06:00
|
|
|
lock(self)
|
|
|
|
|
2009-10-13 12:28:00 -05:00
|
|
|
def __repr__(self):
|
|
|
|
args = (self.callback.__name__,) + tuple(repr(k) for k in self.keys)
|
|
|
|
return '%s(%s)' % (
|
|
|
|
self.__class__.__name__,
|
|
|
|
', '.join(args)
|
|
|
|
)
|
|
|
|
|
2008-12-11 21:30:59 -06:00
|
|
|
def __call__(self, **kw):
|
|
|
|
"""
|
2009-01-02 18:27:44 -06:00
|
|
|
Call the callback if all keys are present.
|
|
|
|
|
|
|
|
If all keys are present, the callback is called and its return value is
|
|
|
|
returned. If any keys are missing, ``None`` is returned.
|
2008-12-11 21:30:59 -06:00
|
|
|
|
|
|
|
:param kw: The keyword arguments.
|
|
|
|
"""
|
|
|
|
vals = tuple(kw.get(k, None) for k in self.keys)
|
|
|
|
if None in vals:
|
|
|
|
return
|
|
|
|
try:
|
|
|
|
return self.callback(*vals)
|
2015-08-24 05:40:33 -05:00
|
|
|
except Exception:
|
2008-12-11 21:30:59 -06:00
|
|
|
pass
|
|
|
|
|
2015-06-05 08:11:54 -05:00
|
|
|
def __json__(self):
|
|
|
|
return self.keys
|
|
|
|
|
2008-12-11 21:30:59 -06:00
|
|
|
|
2008-12-18 02:45:13 -06:00
|
|
|
def parse_param_spec(spec):
|
|
|
|
"""
|
2009-01-13 01:27:06 -06:00
|
|
|
Parse shorthand ``spec`` into to ``(name, kw)``.
|
2008-12-18 02:45:13 -06:00
|
|
|
|
2009-01-13 01:27:06 -06:00
|
|
|
The ``spec`` string determines the parameter name, whether the parameter is
|
|
|
|
required, and whether the parameter is multivalue according the following
|
2008-12-18 02:45:13 -06:00
|
|
|
syntax:
|
|
|
|
|
|
|
|
====== ===== ======== ==========
|
|
|
|
Spec Name Required Multivalue
|
|
|
|
====== ===== ======== ==========
|
|
|
|
'var' 'var' True False
|
|
|
|
'var?' 'var' False False
|
|
|
|
'var*' 'var' False True
|
|
|
|
'var+' 'var' True True
|
|
|
|
====== ===== ======== ==========
|
|
|
|
|
|
|
|
For example,
|
|
|
|
|
|
|
|
>>> parse_param_spec('login')
|
|
|
|
('login', {'required': True, 'multivalue': False})
|
|
|
|
>>> parse_param_spec('gecos?')
|
|
|
|
('gecos', {'required': False, 'multivalue': False})
|
|
|
|
>>> parse_param_spec('telephone_numbers*')
|
|
|
|
('telephone_numbers', {'required': False, 'multivalue': True})
|
|
|
|
>>> parse_param_spec('group+')
|
|
|
|
('group', {'required': True, 'multivalue': True})
|
|
|
|
|
|
|
|
:param spec: A spec string.
|
|
|
|
"""
|
|
|
|
if type(spec) is not str:
|
2008-12-18 02:57:39 -06:00
|
|
|
raise TypeError(
|
|
|
|
TYPE_ERROR % ('spec', str, spec, type(spec))
|
|
|
|
)
|
2008-12-18 02:45:13 -06:00
|
|
|
_map = {
|
|
|
|
'?': dict(required=False, multivalue=False),
|
|
|
|
'*': dict(required=False, multivalue=True),
|
|
|
|
'+': dict(required=True, multivalue=True),
|
|
|
|
}
|
|
|
|
end = spec[-1]
|
|
|
|
if end in _map:
|
|
|
|
return (spec[:-1], _map[end])
|
|
|
|
return (spec, dict(required=True, multivalue=False))
|
|
|
|
|
|
|
|
|
2009-01-13 03:17:16 -06:00
|
|
|
__messages = set()
|
|
|
|
|
|
|
|
def _(message):
|
|
|
|
__messages.add(message)
|
|
|
|
return message
|
|
|
|
|
|
|
|
|
2008-12-10 22:14:05 -06:00
|
|
|
class Param(ReadOnly):
|
|
|
|
"""
|
2008-12-11 23:39:50 -06:00
|
|
|
Base class for all parameters.
|
2011-11-14 10:03:44 -06:00
|
|
|
|
|
|
|
Param attributes:
|
|
|
|
=================
|
|
|
|
The behavior of Param class and subclasses can be controlled using the
|
|
|
|
following set of attributes:
|
|
|
|
|
|
|
|
- cli_name: option name in CLI
|
|
|
|
- cli_short_name: one character version of cli_name
|
2014-03-18 04:11:19 -05:00
|
|
|
- deprecated_cli_aliases: deprecated CLI aliases
|
2011-11-14 10:03:44 -06:00
|
|
|
- label: very short description of the parameter. This value is used in
|
|
|
|
when the Command output is printed to CLI or in a Command help
|
|
|
|
- doc: parameter long description used in help
|
|
|
|
- required: the parameter is marked as required for given Command
|
|
|
|
- multivalue: indicates if the attribute is multivalued
|
|
|
|
- primary_key: Command's parameter primary key is used for unique
|
|
|
|
identification of an LDAP object and for sorting
|
|
|
|
- normalizer: a custom function for Param value normalization
|
|
|
|
- default_from: a custom function for generating default values of
|
|
|
|
parameter instance
|
|
|
|
- autofill: by default, only `required` parameters get a default value
|
2012-03-15 03:32:37 -05:00
|
|
|
from the default_from function. When autofill is enabled, optional
|
|
|
|
attributes get the default value filled too
|
2011-11-14 10:03:44 -06:00
|
|
|
- query: this attribute is controlled by framework. When the `query`
|
|
|
|
is enabled, framework assumes that the value is only queried and not
|
|
|
|
inserted in the LDAP. Validation is then relaxed - custom
|
|
|
|
parameter validators are skipped and only basic class validators are
|
|
|
|
executed to check the parameter value
|
|
|
|
- attribute: this attribute is controlled by framework and enabled for
|
|
|
|
all LDAP objects parameters (unless parameter has "virtual_attribute"
|
|
|
|
flag). All parameters with enabled `attribute` are being encoded and
|
|
|
|
placed to an entry passed to LDAP Create/Update calls
|
|
|
|
- include: a list of contexts where this parameter should be included.
|
|
|
|
`Param.use_in_context()` provides further information.
|
|
|
|
- exclude: a list of contexts where this parameter should be excluded.
|
|
|
|
`Param.use_in_context()` provides further information.
|
|
|
|
- flags: there are several flags that can be used to further tune the
|
|
|
|
parameter behavior:
|
|
|
|
* no_display (Output parameters only): do not display the parameter
|
|
|
|
* no_create: do not include the parameter for crud.Create based
|
|
|
|
commands
|
2013-10-01 12:55:22 -05:00
|
|
|
* no_update: do not include the parameter for crud.Update based
|
|
|
|
commands
|
|
|
|
* no_search: do not include the parameter for crud.Search based
|
2011-11-14 10:03:44 -06:00
|
|
|
commands
|
2012-05-21 04:03:21 -05:00
|
|
|
* no_option: this attribute is not displayed in the CLI, usually
|
|
|
|
because there's a better way of setting it (for example, a
|
|
|
|
separate command)
|
2011-11-14 10:03:44 -06:00
|
|
|
* virtual_attribute: the parameter is not stored physically in the
|
|
|
|
LDAP and thus attribute `attribute` is not enabled
|
|
|
|
* suppress_empty (Output parameters only): do not display parameter
|
|
|
|
value when empty
|
|
|
|
* ask_create: CLI asks for parameter value even when the parameter
|
|
|
|
is not `required`. Applied for all crud.Create based commands
|
|
|
|
* ask_update: CLI asks for parameter value even when the parameter
|
|
|
|
is not `required`. Applied for all crud.Update based commands
|
|
|
|
* req_update: The parameter is `required` in all crud.Update based
|
|
|
|
commands
|
2012-04-13 08:13:50 -05:00
|
|
|
* nonempty: This is an internal flag; a required attribute should
|
|
|
|
be used instead of it.
|
|
|
|
The value of this parameter must not be empty, but it may
|
|
|
|
not be given at all. All crud.Update commands automatically
|
|
|
|
convert required parameters to `nonempty` ones, so the value
|
|
|
|
can be unspecified (unchanged) but cannot be deleted.
|
2013-10-01 12:57:24 -05:00
|
|
|
* optional_create: do not require the parameter for crud.Create
|
|
|
|
based commands
|
2013-09-13 09:08:22 -05:00
|
|
|
* allow_mod_for_managed_permission: permission-mod allows changing
|
|
|
|
the parameter for managed permissions
|
Only split CSV in the client, quote instead of escaping
Splitting on commas is not an idempotent operation:
'a,b\,c' -> ('a', 'b,c') -> ('a', 'b', 'c')
That means we can't do it when the call is forwarded, so this is only
done on the CLI. The UI already sends values as a tuple.
Replace escaping in the csv parser with quoting. Quoted strings can have
embedded commas instead of having to escape them. This prevents the csv
parser from eating all escape characters.
Also, document Param's csv arguments, and update tests.
https://fedorahosted.org/freeipa/ticket/2417
https://fedorahosted.org/freeipa/ticket/2227
2012-02-23 06:29:47 -06:00
|
|
|
- hint: this attribute is currently not used
|
2011-11-14 10:03:44 -06:00
|
|
|
- alwaysask: when enabled, CLI asks for parameter value even when the
|
|
|
|
parameter is not `required`
|
|
|
|
- sortorder: used to sort a list of parameters for Command. See
|
|
|
|
`Command.finalize()` for further information
|
2013-02-14 10:49:47 -06:00
|
|
|
- csv: this multivalue attribute used to be given in CSV format in CLI
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
|
|
|
|
2008-12-12 04:38:02 -06:00
|
|
|
# This is a dummy type so that most of the functionality of Param can be
|
2008-12-12 05:48:25 -06:00
|
|
|
# unit tested directly without always creating a subclass; however, a real
|
2013-09-30 11:45:37 -05:00
|
|
|
# (direct) subclass must *always* override this class attribute.
|
|
|
|
# If multiple types are permitted, set `type` to the canonical type and
|
|
|
|
# `allowed_types` to a tuple of all allowed types.
|
2015-09-17 10:56:45 -05:00
|
|
|
type = type(None) # Ouch, this wont be very useful in the real world!
|
2008-12-12 04:38:02 -06:00
|
|
|
|
2009-01-13 03:17:16 -06:00
|
|
|
# Subclasses should override this with something more specific:
|
|
|
|
type_error = _('incorrect type')
|
|
|
|
|
2009-11-04 08:41:48 -06:00
|
|
|
# _convert_scalar operates only on scalar values
|
|
|
|
scalar_error = _('Only one value is allowed')
|
|
|
|
|
2013-09-30 12:06:37 -05:00
|
|
|
password = False
|
|
|
|
|
2008-12-11 23:39:50 -06:00
|
|
|
kwargs = (
|
|
|
|
('cli_name', str, None),
|
2009-06-10 14:07:24 -05:00
|
|
|
('cli_short_name', str, None),
|
2014-03-18 04:11:19 -05:00
|
|
|
('deprecated_cli_aliases', frozenset, frozenset()),
|
2015-08-10 11:29:33 -05:00
|
|
|
('label', (six.string_types, Gettext), None),
|
|
|
|
('doc', (six.string_types, Gettext), None),
|
2008-12-11 23:39:50 -06:00
|
|
|
('required', bool, True),
|
|
|
|
('multivalue', bool, False),
|
|
|
|
('primary_key', bool, False),
|
|
|
|
('normalizer', callable, None),
|
2009-01-12 23:48:04 -06:00
|
|
|
('default_from', DefaultFrom, None),
|
2009-01-13 21:27:19 -06:00
|
|
|
('autofill', bool, False),
|
2009-01-14 15:04:05 -06:00
|
|
|
('query', bool, False),
|
2009-01-21 18:19:39 -06:00
|
|
|
('attribute', bool, False),
|
2009-05-13 02:04:35 -05:00
|
|
|
('include', frozenset, None),
|
|
|
|
('exclude', frozenset, None),
|
2008-12-11 23:39:50 -06:00
|
|
|
('flags', frozenset, frozenset()),
|
2010-09-16 09:28:07 -05:00
|
|
|
('hint', (str, Gettext), None),
|
2010-12-02 15:29:26 -06:00
|
|
|
('alwaysask', bool, False),
|
2011-09-16 14:08:17 -05:00
|
|
|
('sortorder', int, 2), # see finalize()
|
2011-11-21 09:50:27 -06:00
|
|
|
('csv', bool, False),
|
2011-12-21 07:44:58 -06:00
|
|
|
('option_group', unicode, None),
|
2008-12-11 23:39:50 -06:00
|
|
|
|
|
|
|
# The 'default' kwarg gets appended in Param.__init__():
|
|
|
|
# ('default', self.type, None),
|
2008-12-10 22:14:05 -06:00
|
|
|
)
|
|
|
|
|
2013-09-30 11:45:37 -05:00
|
|
|
@property
|
|
|
|
def allowed_types(self):
|
|
|
|
"""The allowed datatypes for this Param"""
|
|
|
|
return (self.type,)
|
|
|
|
|
2008-12-12 05:48:25 -06:00
|
|
|
def __init__(self, name, *rules, **kw):
|
2008-12-12 04:38:02 -06:00
|
|
|
# We keep these values to use in __repr__():
|
2008-12-10 22:14:05 -06:00
|
|
|
self.param_spec = name
|
2008-12-11 23:39:50 -06:00
|
|
|
self.__kw = dict(kw)
|
2008-12-12 04:38:02 -06:00
|
|
|
|
|
|
|
# Merge in kw from parse_param_spec():
|
2009-06-01 11:59:58 -05:00
|
|
|
(name, kw_from_spec) = parse_param_spec(name)
|
|
|
|
if not 'required' in kw:
|
|
|
|
kw['required'] = kw_from_spec['required']
|
|
|
|
if not 'multivalue' in kw:
|
|
|
|
kw['multivalue'] = kw_from_spec['multivalue']
|
2008-12-10 22:14:05 -06:00
|
|
|
self.name = check_name(name)
|
2008-12-17 19:32:46 -06:00
|
|
|
self.nice = '%s(%r)' % (self.__class__.__name__, self.param_spec)
|
2008-12-12 04:38:02 -06:00
|
|
|
|
|
|
|
# Add 'default' to self.kwargs and makes sure no unknown kw were given:
|
2013-09-30 11:45:37 -05:00
|
|
|
assert all(type(t) is type for t in self.allowed_types)
|
2009-02-23 11:55:36 -06:00
|
|
|
if kw.get('multivalue', True):
|
|
|
|
self.kwargs += (('default', tuple, None),)
|
|
|
|
else:
|
|
|
|
self.kwargs += (('default', self.type, None),)
|
2008-12-12 04:13:58 -06:00
|
|
|
if not set(t[0] for t in self.kwargs).issuperset(self.__kw):
|
|
|
|
extra = set(kw) - set(t[0] for t in self.kwargs)
|
|
|
|
raise TypeError(
|
2008-12-12 04:38:02 -06:00
|
|
|
'%s: takes no such kwargs: %s' % (self.nice,
|
2008-12-12 04:13:58 -06:00
|
|
|
', '.join(repr(k) for k in sorted(extra))
|
|
|
|
)
|
|
|
|
)
|
2008-12-12 04:38:02 -06:00
|
|
|
|
2009-12-09 10:09:53 -06:00
|
|
|
# Merge in default for 'cli_name', label, doc if not given:
|
|
|
|
if kw.get('cli_name') is None:
|
2008-12-11 23:39:50 -06:00
|
|
|
kw['cli_name'] = self.name
|
2008-12-12 04:38:02 -06:00
|
|
|
|
2009-12-09 10:09:53 -06:00
|
|
|
if kw.get('label') is None:
|
2010-02-19 10:08:16 -06:00
|
|
|
kw['label'] = FixMe(self.name)
|
2009-12-09 10:09:53 -06:00
|
|
|
|
|
|
|
if kw.get('doc') is None:
|
|
|
|
kw['doc'] = kw['label']
|
|
|
|
|
2008-12-12 04:38:02 -06:00
|
|
|
# Wrap 'default_from' in a DefaultFrom if not already:
|
2008-12-11 23:39:50 -06:00
|
|
|
df = kw.get('default_from', None)
|
2008-12-11 21:30:59 -06:00
|
|
|
if callable(df) and not isinstance(df, DefaultFrom):
|
2008-12-11 23:39:50 -06:00
|
|
|
kw['default_from'] = DefaultFrom(df)
|
2008-12-12 04:38:02 -06:00
|
|
|
|
2008-12-12 05:48:25 -06:00
|
|
|
# We keep this copy with merged values also to use when cloning:
|
2008-12-11 23:39:50 -06:00
|
|
|
self.__clonekw = kw
|
2008-12-12 04:38:02 -06:00
|
|
|
|
2008-12-12 05:48:25 -06:00
|
|
|
# Perform type validation on kw, add in class rules:
|
|
|
|
class_rules = []
|
2008-12-11 23:39:50 -06:00
|
|
|
for (key, kind, default) in self.kwargs:
|
|
|
|
value = kw.get(key, default)
|
|
|
|
if value is not None:
|
2008-12-12 04:38:02 -06:00
|
|
|
if kind is frozenset:
|
Allow sets for initialization of frozenset-typed Param keywords
Lists and tuples are already allowed for convenience; it is easier to write
(1, 2, 3) or [1, 2, 3] than frozenset([1, 2, 3]).
This allows the set literal syntax, {1, 2, 3}, as well.
2013-09-12 03:46:52 -05:00
|
|
|
if type(value) in (list, tuple, set):
|
2008-12-12 04:38:02 -06:00
|
|
|
value = frozenset(value)
|
|
|
|
elif type(value) is str:
|
|
|
|
value = frozenset([value])
|
2008-12-11 19:07:54 -06:00
|
|
|
if (
|
2011-02-23 15:47:49 -06:00
|
|
|
type(kind) is type and not isinstance(value, kind)
|
2008-12-11 21:30:59 -06:00
|
|
|
or
|
2008-12-11 19:07:54 -06:00
|
|
|
type(kind) is tuple and not isinstance(value, kind)
|
|
|
|
):
|
|
|
|
raise TypeError(
|
|
|
|
TYPE_ERROR % (key, kind, value, type(value))
|
|
|
|
)
|
|
|
|
elif kind is callable and not callable(value):
|
|
|
|
raise TypeError(
|
|
|
|
CALLABLE_ERROR % (key, value, type(value))
|
|
|
|
)
|
|
|
|
if hasattr(self, key):
|
|
|
|
raise ValueError('kwarg %r conflicts with attribute on %s' % (
|
|
|
|
key, self.__class__.__name__)
|
|
|
|
)
|
|
|
|
setattr(self, key, value)
|
2008-12-18 00:30:29 -06:00
|
|
|
rule_name = '_rule_%s' % key
|
2008-12-12 05:48:25 -06:00
|
|
|
if value is not None and hasattr(self, rule_name):
|
|
|
|
class_rules.append(getattr(self, rule_name))
|
2008-12-11 21:30:59 -06:00
|
|
|
check_name(self.cli_name)
|
2008-12-12 05:48:25 -06:00
|
|
|
|
2009-05-13 02:04:35 -05:00
|
|
|
# Check that only 'include' or 'exclude' was provided:
|
|
|
|
if None not in (self.include, self.exclude):
|
|
|
|
raise ValueError(
|
|
|
|
'%s: cannot have both %s=%r and %s=%r' % (
|
|
|
|
self.nice,
|
|
|
|
'include', self.include,
|
|
|
|
'exclude', self.exclude,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2011-11-21 09:50:27 -06:00
|
|
|
# Check that if csv is set, multivalue is set too
|
|
|
|
if self.csv and not self.multivalue:
|
|
|
|
raise ValueError('%s: cannot have csv without multivalue' % self.nice)
|
|
|
|
|
2008-12-12 05:48:25 -06:00
|
|
|
# Check that all the rules are callable
|
2008-12-17 19:32:46 -06:00
|
|
|
self.class_rules = tuple(class_rules)
|
|
|
|
self.rules = rules
|
2011-06-24 13:32:57 -05:00
|
|
|
if self.query:
|
2012-02-29 12:31:20 -06:00
|
|
|
# by definition a query enforces no class or parameter rules
|
|
|
|
self.all_rules = ()
|
2011-06-24 13:32:57 -05:00
|
|
|
else:
|
|
|
|
self.all_rules = self.class_rules + self.rules
|
2008-12-17 19:32:46 -06:00
|
|
|
for rule in self.all_rules:
|
2008-12-12 05:48:25 -06:00
|
|
|
if not callable(rule):
|
|
|
|
raise TypeError(
|
|
|
|
'%s: rules must be callable; got %r' % (self.nice, rule)
|
|
|
|
)
|
|
|
|
|
2009-08-04 01:21:26 -05:00
|
|
|
# Check that cli_short_name is only 1 character long:
|
|
|
|
if not (self.cli_short_name is None or len(self.cli_short_name) == 1):
|
|
|
|
raise ValueError(
|
|
|
|
'%s: cli_short_name can only be a single character: %s' % (
|
|
|
|
self.nice, self.cli_short_name)
|
|
|
|
)
|
|
|
|
|
2008-12-12 05:48:25 -06:00
|
|
|
# And we're done.
|
2008-12-10 22:14:05 -06:00
|
|
|
lock(self)
|
|
|
|
|
2008-12-18 12:21:12 -06:00
|
|
|
def __repr__(self):
|
|
|
|
"""
|
|
|
|
Return an expresion that could construct this `Param` instance.
|
|
|
|
"""
|
2009-05-13 02:04:35 -05:00
|
|
|
return '%s(%s)' % (
|
2008-12-18 12:21:12 -06:00
|
|
|
self.__class__.__name__,
|
2009-05-13 02:04:35 -05:00
|
|
|
', '.join(self.__repr_iter())
|
2008-12-18 12:21:12 -06:00
|
|
|
)
|
|
|
|
|
2009-05-13 02:04:35 -05:00
|
|
|
def __repr_iter(self):
|
|
|
|
yield repr(self.param_spec)
|
|
|
|
for rule in self.rules:
|
|
|
|
yield rule.__name__
|
|
|
|
for key in sorted(self.__kw):
|
2009-10-13 12:28:00 -05:00
|
|
|
value = self.__kw[key]
|
|
|
|
if callable(value) and hasattr(value, '__name__'):
|
|
|
|
value = value.__name__
|
2016-05-19 09:24:47 -05:00
|
|
|
elif isinstance(value, six.integer_types):
|
|
|
|
value = str(value)
|
2009-10-13 12:28:00 -05:00
|
|
|
else:
|
|
|
|
value = repr(value)
|
|
|
|
yield '%s=%s' % (key, value)
|
2009-05-13 02:04:35 -05:00
|
|
|
|
2009-01-13 21:27:19 -06:00
|
|
|
def __call__(self, value, **kw):
|
|
|
|
"""
|
|
|
|
One stop shopping.
|
|
|
|
"""
|
2014-02-21 10:38:32 -06:00
|
|
|
if _is_null(value):
|
2009-01-13 21:27:19 -06:00
|
|
|
value = self.get_default(**kw)
|
|
|
|
else:
|
|
|
|
value = self.convert(self.normalize(value))
|
2010-10-11 21:27:57 -05:00
|
|
|
if hasattr(self, 'env'):
|
2012-02-16 06:11:56 -06:00
|
|
|
self.validate(value, self.env.context, supplied=self.name in kw) #pylint: disable=E1101
|
2010-10-11 21:27:57 -05:00
|
|
|
else:
|
2012-02-16 06:11:56 -06:00
|
|
|
self.validate(value, supplied=self.name in kw)
|
2009-01-13 21:27:19 -06:00
|
|
|
return value
|
|
|
|
|
2012-03-16 12:30:59 -05:00
|
|
|
def get_param_name(self):
|
|
|
|
"""
|
|
|
|
Return the right name of an attribute depending on usage.
|
|
|
|
|
|
|
|
Normally errors should use cli_name, our "friendly" name. When
|
|
|
|
using the API directly or *attr return the real name.
|
|
|
|
"""
|
|
|
|
name = self.cli_name
|
|
|
|
if not name:
|
|
|
|
name = self.name
|
|
|
|
return name
|
|
|
|
|
2009-10-13 12:28:00 -05:00
|
|
|
def kw(self):
|
|
|
|
"""
|
|
|
|
Iterate through ``(key,value)`` for all kwargs passed to constructor.
|
|
|
|
"""
|
|
|
|
for key in sorted(self.__kw):
|
|
|
|
value = self.__kw[key]
|
|
|
|
if callable(value) and hasattr(value, '__name__'):
|
|
|
|
value = value.__name__
|
|
|
|
yield (key, value)
|
|
|
|
|
2009-05-13 02:04:35 -05:00
|
|
|
def use_in_context(self, env):
|
|
|
|
"""
|
2009-05-20 16:19:09 -05:00
|
|
|
Return ``True`` if this parameter should be used in ``env.context``.
|
2009-05-13 02:04:35 -05:00
|
|
|
|
2009-05-20 16:19:09 -05:00
|
|
|
If a parameter is created with niether the ``include`` nor the
|
|
|
|
``exclude`` kwarg, this method will always return ``True``. For
|
|
|
|
example:
|
2009-05-13 02:04:35 -05:00
|
|
|
|
|
|
|
>>> from ipalib.config import Env
|
2009-05-20 16:19:09 -05:00
|
|
|
>>> param = Param('my_param')
|
|
|
|
>>> param.use_in_context(Env(context='foo'))
|
2009-05-13 02:04:35 -05:00
|
|
|
True
|
2009-05-20 16:19:09 -05:00
|
|
|
>>> param.use_in_context(Env(context='bar'))
|
|
|
|
True
|
|
|
|
|
|
|
|
If a parameter is created with an ``include`` kwarg, this method will
|
|
|
|
only return ``True`` if ``env.context`` is in ``include``. For example:
|
|
|
|
|
|
|
|
>>> param = Param('my_param', include=['foo', 'whatever'])
|
|
|
|
>>> param.include
|
|
|
|
frozenset(['foo', 'whatever'])
|
|
|
|
>>> param.use_in_context(Env(context='foo'))
|
|
|
|
True
|
|
|
|
>>> param.use_in_context(Env(context='bar'))
|
|
|
|
False
|
|
|
|
|
|
|
|
If a paremeter is created with an ``exclude`` kwarg, this method will
|
|
|
|
only return ``True`` if ``env.context`` is not in ``exclude``. For
|
|
|
|
example:
|
|
|
|
|
|
|
|
>>> param = Param('my_param', exclude=['foo', 'whatever'])
|
|
|
|
>>> param.exclude
|
|
|
|
frozenset(['foo', 'whatever'])
|
|
|
|
>>> param.use_in_context(Env(context='foo'))
|
2009-05-13 02:04:35 -05:00
|
|
|
False
|
2009-05-20 16:19:09 -05:00
|
|
|
>>> param.use_in_context(Env(context='bar'))
|
|
|
|
True
|
|
|
|
|
|
|
|
Note that the ``include`` and ``exclude`` kwargs are mutually exclusive
|
|
|
|
and that at most one can be suppelied to `Param.__init__()`. For
|
|
|
|
example:
|
|
|
|
|
|
|
|
>>> param = Param('nope', include=['foo'], exclude=['bar'])
|
|
|
|
Traceback (most recent call last):
|
|
|
|
...
|
|
|
|
ValueError: Param('nope'): cannot have both include=frozenset(['foo']) and exclude=frozenset(['bar'])
|
2009-05-13 02:04:35 -05:00
|
|
|
|
2009-05-20 16:19:09 -05:00
|
|
|
So that subclasses can add additional logic based on other environment
|
|
|
|
variables, the entire `config.Env` instance is passed in rather than
|
|
|
|
just the value of ``env.context``.
|
2009-05-13 02:04:35 -05:00
|
|
|
"""
|
|
|
|
if self.include is not None:
|
|
|
|
return (env.context in self.include)
|
|
|
|
if self.exclude is not None:
|
|
|
|
return (env.context not in self.exclude)
|
|
|
|
return True
|
|
|
|
|
2009-01-22 19:48:21 -06:00
|
|
|
def safe_value(self, value):
|
2009-01-23 13:20:32 -06:00
|
|
|
"""
|
|
|
|
Return a value safe for logging.
|
|
|
|
|
2013-09-30 12:06:37 -05:00
|
|
|
This is used so that sensitive values like passwords don't get logged.
|
|
|
|
For example:
|
2009-01-23 13:20:32 -06:00
|
|
|
|
|
|
|
>>> p = Password('my_password')
|
|
|
|
>>> p.safe_value(u'This is my password')
|
|
|
|
u'********'
|
|
|
|
>>> p.safe_value(None) is None
|
|
|
|
True
|
|
|
|
|
|
|
|
>>> s = Str('my_str')
|
|
|
|
>>> s.safe_value(u'Some arbitrary value')
|
|
|
|
u'Some arbitrary value'
|
|
|
|
"""
|
2009-01-22 20:40:02 -06:00
|
|
|
if self.password and value is not None:
|
2009-01-22 19:48:21 -06:00
|
|
|
return u'********'
|
|
|
|
return value
|
|
|
|
|
2009-01-13 20:49:23 -06:00
|
|
|
def clone(self, **overrides):
|
|
|
|
"""
|
|
|
|
Return a new `Param` instance similar to this one.
|
|
|
|
"""
|
2010-12-14 04:06:26 -06:00
|
|
|
return self.clone_rename(self.name, **overrides)
|
|
|
|
|
|
|
|
def clone_rename(self, name, **overrides):
|
|
|
|
"""
|
|
|
|
Return a new `Param` instance similar to this one, but named differently
|
|
|
|
"""
|
2011-01-05 09:07:23 -06:00
|
|
|
return self.clone_retype(name, self.__class__, **overrides)
|
|
|
|
|
|
|
|
def clone_retype(self, name, klass, **overrides):
|
|
|
|
"""
|
|
|
|
Return a new `Param` instance similar to this one, but of a different type
|
|
|
|
"""
|
2009-01-13 20:49:23 -06:00
|
|
|
kw = dict(self.__clonekw)
|
|
|
|
kw.update(overrides)
|
2011-01-05 09:07:23 -06:00
|
|
|
return klass(name, *self.rules, **kw)
|
2009-01-13 20:49:23 -06:00
|
|
|
|
2008-12-10 22:14:05 -06:00
|
|
|
def normalize(self, value):
|
|
|
|
"""
|
2008-12-11 21:30:59 -06:00
|
|
|
Normalize ``value`` using normalizer callback.
|
|
|
|
|
|
|
|
For example:
|
|
|
|
|
2008-12-12 04:13:58 -06:00
|
|
|
>>> param = Param('telephone',
|
2008-12-11 21:30:59 -06:00
|
|
|
... normalizer=lambda value: value.replace('.', '-')
|
|
|
|
... )
|
|
|
|
>>> param.normalize(u'800.123.4567')
|
|
|
|
u'800-123-4567'
|
|
|
|
|
|
|
|
If this `Param` instance was created with a normalizer callback and
|
|
|
|
``value`` is a unicode instance, the normalizer callback is called and
|
|
|
|
*its* return value is returned.
|
|
|
|
|
|
|
|
On the other hand, if this `Param` instance was *not* created with a
|
|
|
|
normalizer callback, if ``value`` is *not* a unicode instance, or if an
|
|
|
|
exception is caught when calling the normalizer callback, ``value`` is
|
|
|
|
returned unchanged.
|
|
|
|
|
|
|
|
:param value: A proposed value for this parameter.
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
2011-11-21 09:50:27 -06:00
|
|
|
if self.multivalue:
|
2012-01-06 08:33:22 -06:00
|
|
|
if type(value) not in (tuple, list):
|
2011-11-21 09:50:27 -06:00
|
|
|
value = (value,)
|
2008-12-10 22:14:05 -06:00
|
|
|
if self.multivalue:
|
2011-11-21 09:50:27 -06:00
|
|
|
return tuple(
|
|
|
|
self._normalize_scalar(v) for v in value
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
return self._normalize_scalar(value)
|
2008-12-10 22:14:05 -06:00
|
|
|
|
2008-12-18 00:32:58 -06:00
|
|
|
def _normalize_scalar(self, value):
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
|
|
|
Normalize a scalar value.
|
|
|
|
|
2008-12-11 23:39:50 -06:00
|
|
|
This method is called once for each value in a multivalue.
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
2012-01-17 04:19:00 -06:00
|
|
|
if self.normalizer is None:
|
|
|
|
return value
|
2008-12-10 22:14:05 -06:00
|
|
|
try:
|
2008-12-11 21:30:59 -06:00
|
|
|
return self.normalizer(value)
|
2015-08-24 05:40:33 -05:00
|
|
|
except Exception:
|
2008-12-10 22:14:05 -06:00
|
|
|
return value
|
|
|
|
|
|
|
|
def convert(self, value):
|
2008-12-18 02:18:17 -06:00
|
|
|
"""
|
|
|
|
Convert ``value`` to the Python type required by this parameter.
|
|
|
|
|
|
|
|
For example:
|
|
|
|
|
|
|
|
>>> scalar = Str('my_scalar')
|
|
|
|
>>> scalar.type
|
|
|
|
<type 'unicode'>
|
|
|
|
>>> scalar.convert(43.2)
|
|
|
|
u'43.2'
|
|
|
|
|
|
|
|
(Note that `Str` is a subclass of `Param`.)
|
|
|
|
|
2014-02-21 10:38:32 -06:00
|
|
|
All non-numeric, non-boolean values which evaluate to False will be
|
|
|
|
converted to None. For example:
|
2008-12-18 02:18:17 -06:00
|
|
|
|
|
|
|
>>> scalar.convert(u'') is None # An empty string
|
|
|
|
True
|
|
|
|
>>> scalar.convert([]) is None # An empty list
|
|
|
|
True
|
|
|
|
|
2014-02-21 10:38:32 -06:00
|
|
|
Likewise, they will be filtered out of a multivalue parameter.
|
|
|
|
For example:
|
2008-12-18 02:18:17 -06:00
|
|
|
|
|
|
|
>>> multi = Str('my_multi', multivalue=True)
|
2009-01-13 19:29:45 -06:00
|
|
|
>>> multi.convert([1.5, '', 17, None, u'Hello'])
|
|
|
|
(u'1.5', u'17', u'Hello')
|
2008-12-18 02:18:17 -06:00
|
|
|
>>> multi.convert([None, u'']) is None # Filters to an empty list
|
|
|
|
True
|
|
|
|
|
2009-01-02 03:22:48 -06:00
|
|
|
Lastly, multivalue parameters will always return a ``tuple`` (assuming
|
|
|
|
they don't return ``None`` as in the last example above). For example:
|
2008-12-18 02:18:17 -06:00
|
|
|
|
|
|
|
>>> multi.convert(42) # Called with a scalar value
|
|
|
|
(u'42',)
|
2009-01-13 19:29:45 -06:00
|
|
|
>>> multi.convert([0, 1]) # Called with a list value
|
|
|
|
(u'0', u'1')
|
2008-12-18 02:18:17 -06:00
|
|
|
|
|
|
|
Note that how values are converted (and from what types they will be
|
|
|
|
converted) completely depends upon how a subclass implements its
|
|
|
|
`Param._convert_scalar()` method. For example, see
|
|
|
|
`Str._convert_scalar()`.
|
|
|
|
|
|
|
|
:param value: A proposed value for this parameter.
|
|
|
|
"""
|
2014-02-21 10:38:32 -06:00
|
|
|
if _is_null(value):
|
2008-12-10 22:14:05 -06:00
|
|
|
return
|
|
|
|
if self.multivalue:
|
2008-12-18 01:02:38 -06:00
|
|
|
if type(value) not in (tuple, list):
|
|
|
|
value = (value,)
|
2008-12-18 02:18:17 -06:00
|
|
|
values = tuple(
|
2016-05-23 06:20:27 -05:00
|
|
|
self._convert_scalar(v) for v in value if not _is_null(v)
|
2008-12-18 01:02:38 -06:00
|
|
|
)
|
|
|
|
if len(values) == 0:
|
|
|
|
return
|
2008-12-18 02:27:03 -06:00
|
|
|
return values
|
2008-12-18 01:02:38 -06:00
|
|
|
return self._convert_scalar(value)
|
2008-12-10 22:14:05 -06:00
|
|
|
|
|
|
|
def _convert_scalar(self, value, index=None):
|
|
|
|
"""
|
2009-01-13 03:17:16 -06:00
|
|
|
Convert a single scalar value.
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
2013-09-30 11:45:37 -05:00
|
|
|
if type(value) in self.allowed_types:
|
2009-01-13 03:17:16 -06:00
|
|
|
return value
|
2016-05-23 06:20:27 -05:00
|
|
|
raise ConversionError(name=self.name, error=ugettext(self.type_error))
|
2008-12-10 22:14:05 -06:00
|
|
|
|
2012-02-16 06:11:56 -06:00
|
|
|
def validate(self, value, context=None, supplied=None):
|
2009-01-03 03:35:36 -06:00
|
|
|
"""
|
|
|
|
Check validity of ``value``.
|
|
|
|
|
|
|
|
:param value: A proposed value for this parameter.
|
2010-10-11 21:27:57 -05:00
|
|
|
:param context: The context we are running in.
|
2012-02-16 06:11:56 -06:00
|
|
|
:param supplied: True if this parameter was supplied explicitly.
|
2009-01-03 03:35:36 -06:00
|
|
|
"""
|
2009-01-12 17:14:46 -06:00
|
|
|
if value is None:
|
2012-02-16 06:11:56 -06:00
|
|
|
if self.required or (supplied and 'nonempty' in self.flags):
|
2010-10-11 21:27:57 -05:00
|
|
|
if context == 'cli':
|
|
|
|
raise RequirementError(name=self.cli_name)
|
|
|
|
else:
|
|
|
|
raise RequirementError(name=self.name)
|
2009-01-12 17:14:46 -06:00
|
|
|
return
|
|
|
|
if self.multivalue:
|
|
|
|
if type(value) is not tuple:
|
|
|
|
raise TypeError(
|
|
|
|
TYPE_ERROR % ('value', tuple, value, type(value))
|
|
|
|
)
|
|
|
|
if len(value) < 1:
|
|
|
|
raise ValueError('value: empty tuple must be converted to None')
|
2016-05-23 06:20:27 -05:00
|
|
|
for v in value:
|
|
|
|
self._validate_scalar(v)
|
2009-01-12 17:14:46 -06:00
|
|
|
else:
|
|
|
|
self._validate_scalar(value)
|
|
|
|
|
|
|
|
def _validate_scalar(self, value, index=None):
|
2013-09-30 11:45:37 -05:00
|
|
|
if type(value) not in self.allowed_types:
|
2012-11-08 09:06:35 -06:00
|
|
|
raise TypeError(
|
|
|
|
TYPE_ERROR % (self.name, self.type, value, type(value))
|
2009-01-12 17:14:46 -06:00
|
|
|
)
|
|
|
|
for rule in self.all_rules:
|
|
|
|
error = rule(ugettext, value)
|
|
|
|
if error is not None:
|
2016-05-23 06:20:27 -05:00
|
|
|
raise ValidationError(name=self.get_param_name(), error=error)
|
2009-01-03 03:35:36 -06:00
|
|
|
|
2009-01-12 23:48:04 -06:00
|
|
|
def get_default(self, **kw):
|
|
|
|
"""
|
|
|
|
Return the static default or construct and return a dynamic default.
|
|
|
|
|
|
|
|
(In these examples, we will use the `Str` and `Bytes` classes, which
|
|
|
|
both subclass from `Param`.)
|
|
|
|
|
|
|
|
The *default* static default is ``None``. For example:
|
|
|
|
|
|
|
|
>>> s = Str('my_str')
|
|
|
|
>>> s.default is None
|
|
|
|
True
|
|
|
|
>>> s.get_default() is None
|
|
|
|
True
|
|
|
|
|
|
|
|
However, you can provide your own static default via the ``default``
|
|
|
|
keyword argument when you create your `Param` instance. For example:
|
|
|
|
|
|
|
|
>>> s = Str('my_str', default=u'My Static Default')
|
|
|
|
>>> s.default
|
|
|
|
u'My Static Default'
|
|
|
|
>>> s.get_default()
|
|
|
|
u'My Static Default'
|
|
|
|
|
|
|
|
If you need to generate a dynamic default from other supplied parameter
|
|
|
|
values, provide a callback via the ``default_from`` keyword argument.
|
|
|
|
This callback will be automatically wrapped in a `DefaultFrom` instance
|
|
|
|
if it isn't one already (see the `DefaultFrom` class for all the gory
|
|
|
|
details). For example:
|
|
|
|
|
|
|
|
>>> login = Str('login', default=u'my-static-login-default',
|
|
|
|
... default_from=lambda first, last: (first[0] + last).lower(),
|
|
|
|
... )
|
|
|
|
>>> isinstance(login.default_from, DefaultFrom)
|
|
|
|
True
|
|
|
|
>>> login.default_from.keys
|
|
|
|
('first', 'last')
|
|
|
|
|
|
|
|
Then when all the keys needed by the `DefaultFrom` instance are present,
|
|
|
|
the dynamic default is constructed and returned. For example:
|
|
|
|
|
|
|
|
>>> kw = dict(last=u'Doe', first=u'John')
|
|
|
|
>>> login.get_default(**kw)
|
|
|
|
u'jdoe'
|
|
|
|
|
|
|
|
Or if any keys are missing, your *static* default is returned.
|
|
|
|
For example:
|
|
|
|
|
|
|
|
>>> kw = dict(first=u'John', department=u'Engineering')
|
|
|
|
>>> login.get_default(**kw)
|
|
|
|
u'my-static-login-default'
|
|
|
|
"""
|
2012-03-15 03:32:37 -05:00
|
|
|
if self.default_from is not None:
|
|
|
|
default = self.default_from(**kw)
|
2009-01-12 23:48:04 -06:00
|
|
|
if default is not None:
|
|
|
|
try:
|
|
|
|
return self.convert(self.normalize(default))
|
2015-08-24 05:40:33 -05:00
|
|
|
except Exception:
|
2009-01-12 23:48:04 -06:00
|
|
|
pass
|
|
|
|
return self.default
|
|
|
|
|
2014-03-12 10:34:22 -05:00
|
|
|
def sort_key(self, value):
|
|
|
|
return value
|
|
|
|
|
2010-08-10 15:40:00 -05:00
|
|
|
def __json__(self):
|
|
|
|
json_dict = {}
|
2015-08-25 09:26:00 -05:00
|
|
|
for (a, k, d) in self.kwargs:
|
|
|
|
if k in (callable, DefaultFrom):
|
|
|
|
continue
|
|
|
|
elif isinstance(getattr(self, a), frozenset):
|
|
|
|
json_dict[a] = [k for k in getattr(self, a, [])]
|
|
|
|
else:
|
|
|
|
val = getattr(self, a, '')
|
|
|
|
if val is None:
|
|
|
|
# ignore 'not set' because lack of their presence is
|
|
|
|
# the information itself
|
|
|
|
continue
|
|
|
|
json_dict[a] = json_serialize(val)
|
|
|
|
|
2010-08-10 15:40:00 -05:00
|
|
|
json_dict['class'] = self.__class__.__name__
|
|
|
|
json_dict['name'] = self.name
|
|
|
|
json_dict['type'] = self.type.__name__
|
2015-08-25 09:26:00 -05:00
|
|
|
|
2010-08-10 15:40:00 -05:00
|
|
|
return json_dict
|
|
|
|
|
2008-12-10 22:14:05 -06:00
|
|
|
|
|
|
|
class Bool(Param):
|
|
|
|
"""
|
2009-01-13 19:29:45 -06:00
|
|
|
A parameter for boolean values (stored in the ``bool`` type).
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
|
|
|
|
2009-01-13 21:27:19 -06:00
|
|
|
type = bool
|
2009-01-14 11:17:39 -06:00
|
|
|
type_error = _('must be True or False')
|
2009-01-13 21:27:19 -06:00
|
|
|
|
2009-10-13 12:28:00 -05:00
|
|
|
# FIXME: This my quick hack to get some UI stuff working, change these defaults
|
|
|
|
# --jderose 2009-08-28
|
|
|
|
kwargs = Param.kwargs + (
|
2011-07-07 10:58:18 -05:00
|
|
|
('truths', frozenset, frozenset([1, u'1', True, u'true', u'TRUE'])),
|
|
|
|
('falsehoods', frozenset, frozenset([0, u'0', False, u'false', u'FALSE'])),
|
2009-10-13 12:28:00 -05:00
|
|
|
)
|
|
|
|
|
|
|
|
def _convert_scalar(self, value, index=None):
|
|
|
|
"""
|
|
|
|
Convert a single scalar value.
|
|
|
|
"""
|
2013-09-30 11:45:37 -05:00
|
|
|
if type(value) in self.allowed_types:
|
2009-10-13 12:28:00 -05:00
|
|
|
return value
|
2015-08-10 11:29:33 -05:00
|
|
|
if isinstance(value, six.string_types):
|
2009-11-26 08:53:07 -06:00
|
|
|
value = value.lower()
|
2009-10-13 12:28:00 -05:00
|
|
|
if value in self.truths:
|
|
|
|
return True
|
|
|
|
if value in self.falsehoods:
|
|
|
|
return False
|
2009-11-04 08:41:48 -06:00
|
|
|
if type(value) in (tuple, list):
|
2016-05-23 06:20:27 -05:00
|
|
|
raise ConversionError(name=self.name,
|
|
|
|
error=ugettext(self.scalar_error))
|
|
|
|
raise ConversionError(name=self.name, error=ugettext(self.type_error))
|
2009-10-13 12:28:00 -05:00
|
|
|
|
2009-01-13 21:27:19 -06:00
|
|
|
|
|
|
|
class Flag(Bool):
|
|
|
|
"""
|
|
|
|
A boolean parameter that always gets filled in with a default value.
|
|
|
|
|
2009-01-14 10:56:10 -06:00
|
|
|
This `Bool` subclass forces ``autofill=True`` in `Flag.__init__()`. If no
|
|
|
|
default is provided, it also fills in a default value of ``False``.
|
|
|
|
Lastly, unlike the `Bool` class, the default must be either ``True`` or
|
|
|
|
``False`` and cannot be ``None``.
|
|
|
|
|
|
|
|
For example:
|
|
|
|
|
2009-01-14 11:17:39 -06:00
|
|
|
>>> flag = Flag('my_flag')
|
|
|
|
>>> (flag.autofill, flag.default)
|
|
|
|
(True, False)
|
|
|
|
|
|
|
|
To have a default value of ``True``, create your `Flag` intance with
|
|
|
|
``default=True``. For example:
|
|
|
|
|
|
|
|
>>> flag = Flag('my_flag', default=True)
|
|
|
|
>>> (flag.autofill, flag.default)
|
|
|
|
(True, True)
|
2009-01-14 10:56:10 -06:00
|
|
|
|
|
|
|
Also note that creating a `Flag` instance with ``autofill=False`` will have
|
|
|
|
no effect. For example:
|
|
|
|
|
2009-01-14 11:17:39 -06:00
|
|
|
>>> flag = Flag('my_flag', autofill=False)
|
|
|
|
>>> flag.autofill
|
|
|
|
True
|
2009-01-13 21:27:19 -06:00
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, name, *rules, **kw):
|
|
|
|
kw['autofill'] = True
|
2009-01-14 10:56:10 -06:00
|
|
|
if 'default' not in kw:
|
|
|
|
kw['default'] = False
|
|
|
|
if type(kw['default']) is not bool:
|
|
|
|
default = kw['default']
|
|
|
|
raise TypeError(
|
|
|
|
TYPE_ERROR % ('default', bool, default, type(default))
|
|
|
|
)
|
2009-01-13 21:27:19 -06:00
|
|
|
super(Flag, self).__init__(name, *rules, **kw)
|
|
|
|
|
2008-12-10 22:14:05 -06:00
|
|
|
|
2009-01-14 21:36:17 -06:00
|
|
|
class Number(Param):
|
|
|
|
"""
|
2012-01-17 04:19:00 -06:00
|
|
|
Base class for the `Int` and `Decimal` parameters.
|
2009-01-14 21:36:17 -06:00
|
|
|
"""
|
|
|
|
|
|
|
|
def _convert_scalar(self, value, index=None):
|
|
|
|
"""
|
|
|
|
Convert a single scalar value.
|
|
|
|
"""
|
2013-09-30 11:45:37 -05:00
|
|
|
if type(value) in self.allowed_types:
|
2009-01-14 21:36:17 -06:00
|
|
|
return value
|
2015-08-12 05:07:52 -05:00
|
|
|
if type(value) in (unicode, float) + six.integer_types:
|
2009-01-14 21:36:17 -06:00
|
|
|
try:
|
|
|
|
return self.type(value)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
2009-11-04 08:41:48 -06:00
|
|
|
if type(value) in (tuple, list):
|
2016-05-23 06:20:27 -05:00
|
|
|
raise ConversionError(name=self.name,
|
|
|
|
error=ugettext(self.scalar_error))
|
|
|
|
raise ConversionError(name=self.name, error=ugettext(self.type_error))
|
2009-01-14 21:36:17 -06:00
|
|
|
|
|
|
|
|
|
|
|
class Int(Number):
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
2009-01-13 19:29:45 -06:00
|
|
|
A parameter for integer values (stored in the ``int`` type).
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
|
|
|
|
2009-01-13 01:27:06 -06:00
|
|
|
type = int
|
2015-08-12 05:07:52 -05:00
|
|
|
allowed_types = six.integer_types
|
2009-01-13 03:17:16 -06:00
|
|
|
type_error = _('must be an integer')
|
2009-01-13 01:27:06 -06:00
|
|
|
|
2009-03-03 15:12:29 -06:00
|
|
|
kwargs = Param.kwargs + (
|
2015-08-12 05:07:52 -05:00
|
|
|
('minvalue', six.integer_types, int(MININT)),
|
|
|
|
('maxvalue', six.integer_types, int(MAXINT)),
|
2009-03-03 15:12:29 -06:00
|
|
|
)
|
|
|
|
|
2013-10-09 03:16:46 -05:00
|
|
|
@staticmethod
|
|
|
|
def convert_int(value):
|
|
|
|
if type(value) in Int.allowed_types:
|
|
|
|
return value
|
|
|
|
|
|
|
|
if type(value) is float:
|
|
|
|
return int(value)
|
|
|
|
|
|
|
|
if type(value) is unicode:
|
|
|
|
if u'.' in value:
|
|
|
|
return int(float(value))
|
2015-09-21 06:29:18 -05:00
|
|
|
if six.PY3 and re.match('0[0-9]+', value):
|
|
|
|
# 0-prefixed octal format
|
|
|
|
return int(value, 8)
|
2013-10-09 03:16:46 -05:00
|
|
|
return int(value, 0)
|
|
|
|
|
|
|
|
raise ValueError(value)
|
|
|
|
|
2009-03-03 15:12:29 -06:00
|
|
|
def __init__(self, name, *rules, **kw):
|
2013-01-28 07:55:20 -06:00
|
|
|
super(Int, self).__init__(name, *rules, **kw)
|
2009-03-03 15:12:29 -06:00
|
|
|
|
|
|
|
if (self.minvalue > self.maxvalue) and (self.minvalue is not None and self.maxvalue is not None):
|
|
|
|
raise ValueError(
|
|
|
|
'%s: minvalue > maxvalue (minvalue=%r, maxvalue=%r)' % (
|
|
|
|
self.nice, self.minvalue, self.maxvalue)
|
|
|
|
)
|
|
|
|
|
2009-11-20 12:41:44 -06:00
|
|
|
def _convert_scalar(self, value, index=None):
|
|
|
|
"""
|
|
|
|
Convert a single scalar value.
|
|
|
|
"""
|
2013-10-09 03:16:46 -05:00
|
|
|
try:
|
|
|
|
return Int.convert_int(value)
|
|
|
|
except ValueError:
|
|
|
|
raise ConversionError(name=self.get_param_name(),
|
|
|
|
error=ugettext(self.type_error))
|
2009-11-20 12:41:44 -06:00
|
|
|
|
2009-03-03 15:12:29 -06:00
|
|
|
def _rule_minvalue(self, _, value):
|
|
|
|
"""
|
|
|
|
Check min constraint.
|
|
|
|
"""
|
2015-08-12 05:07:52 -05:00
|
|
|
assert type(value) in six.integer_types
|
2012-09-04 08:49:26 -05:00
|
|
|
if value < self.minvalue:
|
2011-07-26 00:58:41 -05:00
|
|
|
return _('must be at least %(minvalue)d') % dict(
|
2009-03-03 15:12:29 -06:00
|
|
|
minvalue=self.minvalue,
|
|
|
|
)
|
|
|
|
|
|
|
|
def _rule_maxvalue(self, _, value):
|
|
|
|
"""
|
|
|
|
Check max constraint.
|
|
|
|
"""
|
2015-08-12 05:07:52 -05:00
|
|
|
assert type(value) in six.integer_types
|
2012-09-04 08:49:26 -05:00
|
|
|
if value > self.maxvalue:
|
2009-03-03 15:12:29 -06:00
|
|
|
return _('can be at most %(maxvalue)d') % dict(
|
|
|
|
maxvalue=self.maxvalue,
|
|
|
|
)
|
2008-12-10 22:14:05 -06:00
|
|
|
|
2011-07-14 02:14:07 -05:00
|
|
|
|
2012-01-17 04:19:00 -06:00
|
|
|
class Decimal(Number):
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
2012-01-17 04:19:00 -06:00
|
|
|
A parameter for floating-point values (stored in the ``Decimal`` type).
|
|
|
|
|
|
|
|
Python Decimal type helps overcome problems tied to plain "float" type,
|
|
|
|
e.g. problem with representation or value comparison. In order to safely
|
|
|
|
transfer the value over RPC libraries, it is being converted to string
|
|
|
|
which is then converted back to Decimal number.
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
|
|
|
|
2012-01-17 04:19:00 -06:00
|
|
|
type = decimal.Decimal
|
2009-01-13 21:27:19 -06:00
|
|
|
type_error = _('must be a decimal number')
|
2009-01-13 01:27:06 -06:00
|
|
|
|
2009-03-03 15:12:29 -06:00
|
|
|
kwargs = Param.kwargs + (
|
2012-01-17 04:19:00 -06:00
|
|
|
('minvalue', decimal.Decimal, None),
|
|
|
|
('maxvalue', decimal.Decimal, None),
|
2012-06-07 02:25:19 -05:00
|
|
|
# round Decimal to given precision
|
2012-01-17 04:19:00 -06:00
|
|
|
('precision', int, None),
|
2012-06-07 02:25:19 -05:00
|
|
|
# when False, number is normalized to non-exponential form
|
|
|
|
('exponential', bool, False),
|
|
|
|
# set of allowed decimal number classes
|
|
|
|
('numberclass', tuple, ('-Normal', '+Zero', '+Normal')),
|
2009-03-03 15:12:29 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
def __init__(self, name, *rules, **kw):
|
2012-01-17 04:19:00 -06:00
|
|
|
for kwparam in ('minvalue', 'maxvalue', 'default'):
|
|
|
|
value = kw.get(kwparam)
|
|
|
|
if value is None:
|
|
|
|
continue
|
2015-08-10 11:29:33 -05:00
|
|
|
if isinstance(value, (six.string_types, float)):
|
2012-01-17 04:19:00 -06:00
|
|
|
try:
|
|
|
|
value = decimal.Decimal(value)
|
2015-07-30 09:49:29 -05:00
|
|
|
except Exception as e:
|
2012-01-17 04:19:00 -06:00
|
|
|
raise ValueError(
|
|
|
|
'%s: cannot parse kwarg %s: %s' % (
|
|
|
|
name, kwparam, str(e)))
|
|
|
|
kw[kwparam] = value
|
|
|
|
|
|
|
|
super(Decimal, self).__init__(name, *rules, **kw)
|
|
|
|
|
2015-09-18 04:30:15 -05:00
|
|
|
if (self.minvalue is not None and
|
|
|
|
self.maxvalue is not None and
|
|
|
|
self.minvalue > self.maxvalue):
|
2009-03-03 15:12:29 -06:00
|
|
|
raise ValueError(
|
2012-01-17 04:19:00 -06:00
|
|
|
'%s: minvalue > maxvalue (minvalue=%s, maxvalue=%s)' % (
|
2009-03-03 15:12:29 -06:00
|
|
|
self.nice, self.minvalue, self.maxvalue)
|
|
|
|
)
|
|
|
|
|
2012-01-17 04:19:00 -06:00
|
|
|
if self.precision is not None and self.precision < 0:
|
|
|
|
raise ValueError('%s: precision must be at least 0' % self.nice)
|
|
|
|
|
2009-03-03 15:12:29 -06:00
|
|
|
def _rule_minvalue(self, _, value):
|
|
|
|
"""
|
|
|
|
Check min constraint.
|
|
|
|
"""
|
2012-01-17 04:19:00 -06:00
|
|
|
assert type(value) is decimal.Decimal
|
2009-03-03 15:12:29 -06:00
|
|
|
if value < self.minvalue:
|
2012-01-17 04:19:00 -06:00
|
|
|
return _('must be at least %(minvalue)s') % dict(
|
2009-03-03 15:12:29 -06:00
|
|
|
minvalue=self.minvalue,
|
|
|
|
)
|
|
|
|
|
|
|
|
def _rule_maxvalue(self, _, value):
|
|
|
|
"""
|
|
|
|
Check max constraint.
|
|
|
|
"""
|
2012-01-17 04:19:00 -06:00
|
|
|
assert type(value) is decimal.Decimal
|
2009-03-03 15:12:29 -06:00
|
|
|
if value > self.maxvalue:
|
2012-01-17 04:19:00 -06:00
|
|
|
return _('can be at most %(maxvalue)s') % dict(
|
2009-03-03 15:12:29 -06:00
|
|
|
maxvalue=self.maxvalue,
|
|
|
|
)
|
|
|
|
|
2012-06-07 02:25:19 -05:00
|
|
|
def _enforce_numberclass(self, value):
|
|
|
|
numberclass = value.number_class()
|
|
|
|
if numberclass not in self.numberclass:
|
|
|
|
raise ValidationError(name=self.get_param_name(),
|
|
|
|
error=_("number class '%(cls)s' is not included in a list "
|
|
|
|
"of allowed number classes: %(allowed)s") \
|
|
|
|
% dict(cls=numberclass,
|
|
|
|
allowed=u', '.join(self.numberclass))
|
|
|
|
)
|
|
|
|
|
2012-01-17 04:19:00 -06:00
|
|
|
def _enforce_precision(self, value):
|
|
|
|
assert type(value) is decimal.Decimal
|
|
|
|
if self.precision is not None:
|
|
|
|
quantize_exp = decimal.Decimal(10) ** -self.precision
|
2012-06-07 02:25:19 -05:00
|
|
|
try:
|
|
|
|
value = value.quantize(quantize_exp)
|
2015-07-30 09:49:29 -05:00
|
|
|
except decimal.DecimalException as e:
|
2012-06-07 02:25:19 -05:00
|
|
|
raise ConversionError(name=self.get_param_name(),
|
|
|
|
error=unicode(e))
|
|
|
|
return value
|
|
|
|
|
|
|
|
def _remove_exponent(self, value):
|
|
|
|
assert type(value) is decimal.Decimal
|
2012-01-17 04:19:00 -06:00
|
|
|
|
2013-01-28 07:55:20 -06:00
|
|
|
if not self.exponential:
|
2012-06-07 02:25:19 -05:00
|
|
|
try:
|
|
|
|
# adopted from http://docs.python.org/library/decimal.html
|
|
|
|
value = value.quantize(decimal.Decimal(1)) \
|
|
|
|
if value == value.to_integral() \
|
|
|
|
else value.normalize()
|
2015-07-30 09:49:29 -05:00
|
|
|
except decimal.DecimalException as e:
|
2012-06-07 02:25:19 -05:00
|
|
|
raise ConversionError(name=self.get_param_name(),
|
|
|
|
error=unicode(e))
|
|
|
|
|
|
|
|
return value
|
|
|
|
|
|
|
|
def _test_and_normalize(self, value):
|
|
|
|
"""
|
|
|
|
This method is run in conversion and normalization methods to test
|
|
|
|
that the Decimal number conforms to Parameter boundaries and then
|
|
|
|
normalizes the value.
|
|
|
|
"""
|
|
|
|
self._enforce_numberclass(value)
|
|
|
|
value = self._remove_exponent(value)
|
|
|
|
value = self._enforce_precision(value)
|
2012-01-17 04:19:00 -06:00
|
|
|
return value
|
|
|
|
|
|
|
|
def _convert_scalar(self, value, index=None):
|
2015-08-10 11:29:33 -05:00
|
|
|
if isinstance(value, (six.string_types, float)):
|
2012-01-17 04:19:00 -06:00
|
|
|
try:
|
|
|
|
value = decimal.Decimal(value)
|
2015-07-30 09:49:29 -05:00
|
|
|
except decimal.DecimalException as e:
|
2016-05-23 06:20:27 -05:00
|
|
|
raise ConversionError(name=self.get_param_name(),
|
2012-01-17 04:19:00 -06:00
|
|
|
error=unicode(e))
|
|
|
|
|
|
|
|
if isinstance(value, decimal.Decimal):
|
2012-06-07 02:25:19 -05:00
|
|
|
return self._test_and_normalize(value)
|
2012-01-17 04:19:00 -06:00
|
|
|
|
2016-05-23 06:20:27 -05:00
|
|
|
return super(Decimal, self)._convert_scalar(value)
|
2012-01-17 04:19:00 -06:00
|
|
|
|
|
|
|
def _normalize_scalar(self, value):
|
|
|
|
if isinstance(value, decimal.Decimal):
|
2012-06-07 02:25:19 -05:00
|
|
|
return self._test_and_normalize(value)
|
2012-01-17 04:19:00 -06:00
|
|
|
|
|
|
|
return super(Decimal, self)._normalize_scalar(value)
|
2008-12-10 22:14:05 -06:00
|
|
|
|
2009-01-14 11:58:05 -06:00
|
|
|
class Data(Param):
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
2009-01-14 11:58:05 -06:00
|
|
|
Base class for the `Bytes` and `Str` parameters.
|
2009-01-13 19:29:45 -06:00
|
|
|
|
2009-01-14 11:58:05 -06:00
|
|
|
Previously `Str` was as subclass of `Bytes`. Now the common functionality
|
|
|
|
has been split into this base class so that ``isinstance(foo, Bytes)`` wont
|
|
|
|
be ``True`` when ``foo`` is actually an `Str` instance (which is confusing).
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
|
|
|
|
2008-12-12 04:13:58 -06:00
|
|
|
kwargs = Param.kwargs + (
|
|
|
|
('minlength', int, None),
|
|
|
|
('maxlength', int, None),
|
|
|
|
('length', int, None),
|
2015-08-10 11:29:33 -05:00
|
|
|
('pattern_errmsg', (six.string_types,), None),
|
2008-12-12 04:13:58 -06:00
|
|
|
)
|
2011-06-24 13:32:57 -05:00
|
|
|
|
2011-04-21 03:13:06 -05:00
|
|
|
re = None
|
|
|
|
re_errmsg = None
|
2008-12-11 23:39:50 -06:00
|
|
|
|
2009-01-13 21:27:19 -06:00
|
|
|
def __init__(self, name, *rules, **kw):
|
2009-01-14 11:58:05 -06:00
|
|
|
super(Data, self).__init__(name, *rules, **kw)
|
2008-12-12 04:13:58 -06:00
|
|
|
|
|
|
|
if not (
|
|
|
|
self.length is None or
|
|
|
|
(self.minlength is None and self.maxlength is None)
|
|
|
|
):
|
|
|
|
raise ValueError(
|
|
|
|
'%s: cannot mix length with minlength or maxlength' % self.nice
|
|
|
|
)
|
|
|
|
|
|
|
|
if self.minlength is not None and self.minlength < 1:
|
|
|
|
raise ValueError(
|
|
|
|
'%s: minlength must be >= 1; got %r' % (self.nice, self.minlength)
|
|
|
|
)
|
|
|
|
|
|
|
|
if self.maxlength is not None and self.maxlength < 1:
|
|
|
|
raise ValueError(
|
|
|
|
'%s: maxlength must be >= 1; got %r' % (self.nice, self.maxlength)
|
|
|
|
)
|
|
|
|
|
|
|
|
if None not in (self.minlength, self.maxlength):
|
|
|
|
if self.minlength > self.maxlength:
|
|
|
|
raise ValueError(
|
|
|
|
'%s: minlength > maxlength (minlength=%r, maxlength=%r)' % (
|
|
|
|
self.nice, self.minlength, self.maxlength)
|
|
|
|
)
|
|
|
|
elif self.minlength == self.maxlength:
|
|
|
|
raise ValueError(
|
|
|
|
'%s: minlength == maxlength; use length=%d instead' % (
|
|
|
|
self.nice, self.minlength)
|
|
|
|
)
|
2008-12-10 22:14:05 -06:00
|
|
|
|
2009-02-06 13:36:49 -06:00
|
|
|
def _rule_pattern(self, _, value):
|
|
|
|
"""
|
|
|
|
Check pattern (regex) contraint.
|
|
|
|
"""
|
2013-09-30 11:45:37 -05:00
|
|
|
assert type(value) in self.allowed_types
|
2009-02-06 13:36:49 -06:00
|
|
|
if self.re.match(value) is None:
|
2010-07-27 15:35:23 -05:00
|
|
|
if self.re_errmsg:
|
|
|
|
return self.re_errmsg % dict(pattern=self.pattern,)
|
|
|
|
else:
|
|
|
|
return _('must match pattern "%(pattern)s"') % dict(
|
|
|
|
pattern=self.pattern,
|
|
|
|
)
|
2009-02-06 13:36:49 -06:00
|
|
|
|
2009-01-14 11:58:05 -06:00
|
|
|
|
|
|
|
class Bytes(Data):
|
|
|
|
"""
|
|
|
|
A parameter for binary data (stored in the ``str`` type).
|
|
|
|
|
|
|
|
This class is named *Bytes* instead of *Str* so it's aligned with the
|
|
|
|
Python v3 ``(str, unicode) => (bytes, str)`` clean-up. See:
|
|
|
|
|
|
|
|
http://docs.python.org/3.0/whatsnew/3.0.html
|
2009-01-21 18:19:39 -06:00
|
|
|
|
|
|
|
Also see the `Str` parameter.
|
2009-01-14 11:58:05 -06:00
|
|
|
"""
|
|
|
|
|
2015-09-11 07:02:13 -05:00
|
|
|
type = bytes
|
2009-01-14 11:58:05 -06:00
|
|
|
type_error = _('must be binary data')
|
2015-09-21 07:15:00 -05:00
|
|
|
kwargs = Data.kwargs + (
|
|
|
|
('pattern', (bytes,), None),
|
|
|
|
)
|
2009-01-14 11:58:05 -06:00
|
|
|
|
2009-02-06 13:36:49 -06:00
|
|
|
def __init__(self, name, *rules, **kw):
|
|
|
|
if kw.get('pattern', None) is None:
|
|
|
|
self.re = None
|
|
|
|
else:
|
|
|
|
self.re = re.compile(kw['pattern'])
|
2010-07-27 15:35:23 -05:00
|
|
|
self.re_errmsg = kw.get('pattern_errmsg', None)
|
2009-02-06 13:36:49 -06:00
|
|
|
super(Bytes, self).__init__(name, *rules, **kw)
|
2009-01-14 11:58:05 -06:00
|
|
|
|
2009-01-13 02:07:33 -06:00
|
|
|
def _rule_minlength(self, _, value):
|
2008-12-12 05:48:25 -06:00
|
|
|
"""
|
|
|
|
Check minlength constraint.
|
|
|
|
"""
|
2015-09-11 07:02:13 -05:00
|
|
|
assert type(value) is bytes
|
2008-12-12 05:48:25 -06:00
|
|
|
if len(value) < self.minlength:
|
2009-01-13 02:07:33 -06:00
|
|
|
return _('must be at least %(minlength)d bytes') % dict(
|
2008-12-12 05:48:25 -06:00
|
|
|
minlength=self.minlength,
|
|
|
|
)
|
|
|
|
|
2009-01-13 02:07:33 -06:00
|
|
|
def _rule_maxlength(self, _, value):
|
2008-12-12 05:48:25 -06:00
|
|
|
"""
|
|
|
|
Check maxlength constraint.
|
|
|
|
"""
|
2015-09-11 07:02:13 -05:00
|
|
|
assert type(value) is bytes
|
2008-12-12 05:48:25 -06:00
|
|
|
if len(value) > self.maxlength:
|
2009-01-13 02:07:33 -06:00
|
|
|
return _('can be at most %(maxlength)d bytes') % dict(
|
2008-12-12 05:48:25 -06:00
|
|
|
maxlength=self.maxlength,
|
|
|
|
)
|
|
|
|
|
2009-01-13 02:07:33 -06:00
|
|
|
def _rule_length(self, _, value):
|
2008-12-12 05:48:25 -06:00
|
|
|
"""
|
|
|
|
Check length constraint.
|
|
|
|
"""
|
2015-09-11 07:02:13 -05:00
|
|
|
assert type(value) is bytes
|
2008-12-12 05:48:25 -06:00
|
|
|
if len(value) != self.length:
|
2009-01-13 02:07:33 -06:00
|
|
|
return _('must be exactly %(length)d bytes') % dict(
|
2008-12-12 05:48:25 -06:00
|
|
|
length=self.length,
|
|
|
|
)
|
|
|
|
|
2012-01-23 08:50:41 -06:00
|
|
|
def _convert_scalar(self, value, index=None):
|
|
|
|
if isinstance(value, unicode):
|
|
|
|
try:
|
|
|
|
value = base64.b64decode(value)
|
2015-10-06 06:54:33 -05:00
|
|
|
except (TypeError, ValueError) as e:
|
2012-08-14 08:53:25 -05:00
|
|
|
raise Base64DecodeError(reason=str(e))
|
2016-05-23 06:20:27 -05:00
|
|
|
return super(Bytes, self)._convert_scalar(value)
|
2012-01-23 08:50:41 -06:00
|
|
|
|
2008-12-12 05:48:25 -06:00
|
|
|
|
2009-01-14 11:58:05 -06:00
|
|
|
class Str(Data):
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
2009-01-14 10:56:10 -06:00
|
|
|
A parameter for Unicode text (stored in the ``unicode`` type).
|
2009-01-13 19:29:45 -06:00
|
|
|
|
|
|
|
This class is named *Str* instead of *Unicode* so it's aligned with the
|
|
|
|
Python v3 ``(str, unicode) => (bytes, str)`` clean-up. See:
|
|
|
|
|
|
|
|
http://docs.python.org/3.0/whatsnew/3.0.html
|
2009-01-21 18:19:39 -06:00
|
|
|
|
|
|
|
Also see the `Bytes` parameter.
|
2008-12-10 22:14:05 -06:00
|
|
|
"""
|
|
|
|
|
2011-06-24 13:32:57 -05:00
|
|
|
kwargs = Data.kwargs + (
|
2015-09-21 07:15:00 -05:00
|
|
|
('pattern', (six.string_types,), None),
|
2011-06-24 13:32:57 -05:00
|
|
|
('noextrawhitespace', bool, True),
|
|
|
|
)
|
|
|
|
|
2008-12-11 23:39:50 -06:00
|
|
|
type = unicode
|
2009-01-13 19:29:45 -06:00
|
|
|
type_error = _('must be Unicode text')
|
2008-12-11 23:39:50 -06:00
|
|
|
|
2009-02-06 13:36:49 -06:00
|
|
|
def __init__(self, name, *rules, **kw):
|
|
|
|
if kw.get('pattern', None) is None:
|
|
|
|
self.re = None
|
|
|
|
else:
|
|
|
|
self.re = re.compile(kw['pattern'], re.UNICODE)
|
2010-07-27 15:35:23 -05:00
|
|
|
self.re_errmsg = kw.get('pattern_errmsg', None)
|
2009-02-06 13:36:49 -06:00
|
|
|
super(Str, self).__init__(name, *rules, **kw)
|
2008-12-12 04:13:58 -06:00
|
|
|
|
2008-12-10 22:14:05 -06:00
|
|
|
def _convert_scalar(self, value, index=None):
|
2009-01-13 19:29:45 -06:00
|
|
|
"""
|
|
|
|
Convert a single scalar value.
|
|
|
|
"""
|
2013-09-30 11:45:37 -05:00
|
|
|
if type(value) in self.allowed_types:
|
2009-01-13 19:29:45 -06:00
|
|
|
return value
|
2015-08-12 05:07:52 -05:00
|
|
|
if type(value) in (float, decimal.Decimal) + six.integer_types:
|
2008-12-10 22:14:05 -06:00
|
|
|
return self.type(value)
|
2009-11-04 08:41:48 -06:00
|
|
|
if type(value) in (tuple, list):
|
2016-05-23 06:20:27 -05:00
|
|
|
raise ConversionError(name=self.name,
|
|
|
|
error=ugettext(self.scalar_error))
|
|
|
|
raise ConversionError(name=self.name, error=ugettext(self.type_error))
|
2009-01-05 03:45:07 -06:00
|
|
|
|
2011-06-24 13:32:57 -05:00
|
|
|
def _rule_noextrawhitespace(self, _, value):
|
|
|
|
"""
|
|
|
|
Do not allow leading/trailing spaces.
|
|
|
|
"""
|
|
|
|
assert type(value) is unicode
|
2013-01-28 07:55:20 -06:00
|
|
|
if self.noextrawhitespace is False:
|
2011-06-24 13:32:57 -05:00
|
|
|
return
|
|
|
|
if len(value) != len(value.strip()):
|
|
|
|
return _('Leading and trailing spaces are not allowed')
|
|
|
|
|
2009-01-13 02:07:33 -06:00
|
|
|
def _rule_minlength(self, _, value):
|
2009-01-05 03:45:07 -06:00
|
|
|
"""
|
|
|
|
Check minlength constraint.
|
|
|
|
"""
|
|
|
|
assert type(value) is unicode
|
|
|
|
if len(value) < self.minlength:
|
2009-01-13 02:07:33 -06:00
|
|
|
return _('must be at least %(minlength)d characters') % dict(
|
2009-01-05 03:45:07 -06:00
|
|
|
minlength=self.minlength,
|
|
|
|
)
|
|
|
|
|
2009-01-13 02:07:33 -06:00
|
|
|
def _rule_maxlength(self, _, value):
|
2009-01-05 03:45:07 -06:00
|
|
|
"""
|
|
|
|
Check maxlength constraint.
|
|
|
|
"""
|
|
|
|
assert type(value) is unicode
|
|
|
|
if len(value) > self.maxlength:
|
2009-01-13 02:07:33 -06:00
|
|
|
return _('can be at most %(maxlength)d characters') % dict(
|
2009-01-05 03:45:07 -06:00
|
|
|
maxlength=self.maxlength,
|
|
|
|
)
|
|
|
|
|
2009-01-13 02:07:33 -06:00
|
|
|
def _rule_length(self, _, value):
|
2009-01-05 03:45:07 -06:00
|
|
|
"""
|
|
|
|
Check length constraint.
|
|
|
|
"""
|
|
|
|
assert type(value) is unicode
|
|
|
|
if len(value) != self.length:
|
2009-01-13 02:07:33 -06:00
|
|
|
return _('must be exactly %(length)d characters') % dict(
|
2009-01-05 03:45:07 -06:00
|
|
|
length=self.length,
|
|
|
|
)
|
2009-01-13 01:27:06 -06:00
|
|
|
|
2014-03-12 10:34:22 -05:00
|
|
|
def sort_key(self, value):
|
|
|
|
return value.lower()
|
2009-01-13 01:27:06 -06:00
|
|
|
|
2010-12-06 14:09:03 -06:00
|
|
|
class IA5Str(Str):
|
|
|
|
"""
|
|
|
|
An IA5String per RFC 4517
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, name, *rules, **kw):
|
|
|
|
super(IA5Str, self).__init__(name, *rules, **kw)
|
|
|
|
|
|
|
|
def _convert_scalar(self, value, index=None):
|
2015-08-10 11:29:33 -05:00
|
|
|
if isinstance(value, six.string_types):
|
2015-08-12 08:23:56 -05:00
|
|
|
for char in value:
|
|
|
|
if ord(char) > 127:
|
2012-03-16 12:30:59 -05:00
|
|
|
raise ConversionError(name=self.get_param_name(),
|
2012-11-08 09:06:35 -06:00
|
|
|
error=_('The character %(char)r is not allowed.') %
|
2015-08-12 08:23:56 -05:00
|
|
|
dict(char=char,)
|
2010-12-06 14:09:03 -06:00
|
|
|
)
|
2016-05-23 06:20:27 -05:00
|
|
|
return super(IA5Str, self)._convert_scalar(value)
|
2010-12-06 14:09:03 -06:00
|
|
|
|
|
|
|
|
2009-01-14 23:19:31 -06:00
|
|
|
class Password(Str):
|
|
|
|
"""
|
|
|
|
A parameter for passwords (stored in the ``unicode`` type).
|
|
|
|
"""
|
|
|
|
|
2013-09-30 12:06:37 -05:00
|
|
|
password = True
|
|
|
|
|
2011-08-24 17:10:22 -05:00
|
|
|
kwargs = Str.kwargs + (
|
|
|
|
('confirm', bool, True),
|
|
|
|
)
|
|
|
|
|
2009-10-16 03:22:39 -05:00
|
|
|
def _convert_scalar(self, value, index=None):
|
|
|
|
if isinstance(value, (tuple, list)) and len(value) == 2:
|
|
|
|
(p1, p2) = value
|
|
|
|
if p1 != p2:
|
2016-05-23 06:20:27 -05:00
|
|
|
raise PasswordMismatch(name=self.name)
|
2009-10-16 03:22:39 -05:00
|
|
|
value = p1
|
2016-05-23 06:20:27 -05:00
|
|
|
return super(Password, self)._convert_scalar(value)
|
2009-10-16 03:22:39 -05:00
|
|
|
|
2009-01-14 23:19:31 -06:00
|
|
|
|
2009-01-18 16:55:56 -06:00
|
|
|
class Enum(Param):
|
|
|
|
"""
|
|
|
|
Base class for parameters with enumerable values.
|
|
|
|
"""
|
|
|
|
|
|
|
|
kwargs = Param.kwargs + (
|
|
|
|
('values', tuple, tuple()),
|
|
|
|
)
|
|
|
|
|
|
|
|
def __init__(self, name, *rules, **kw):
|
|
|
|
super(Enum, self).__init__(name, *rules, **kw)
|
|
|
|
for (i, v) in enumerate(self.values):
|
2013-09-30 11:45:37 -05:00
|
|
|
if type(v) not in self.allowed_types:
|
2009-01-18 16:55:56 -06:00
|
|
|
n = '%s values[%d]' % (self.nice, i)
|
|
|
|
raise TypeError(
|
|
|
|
TYPE_ERROR % (n, self.type, v, type(v))
|
|
|
|
)
|
|
|
|
|
2012-09-27 07:18:02 -05:00
|
|
|
if len(self.values) < 1:
|
|
|
|
raise ValueError(
|
|
|
|
'%s: list of values must not be empty' % self.nice)
|
|
|
|
|
2009-01-18 16:55:56 -06:00
|
|
|
def _rule_values(self, _, value, **kw):
|
|
|
|
if value not in self.values:
|
2012-09-27 07:18:02 -05:00
|
|
|
if len(self.values) == 1:
|
|
|
|
return _("must be '%(value)s'") % dict(value=self.values[0])
|
|
|
|
else:
|
|
|
|
values = u', '.join("'%s'" % value for value in self.values)
|
|
|
|
return _('must be one of %(values)s') % dict(values=values)
|
2009-01-18 16:55:56 -06:00
|
|
|
|
|
|
|
class BytesEnum(Enum):
|
|
|
|
"""
|
|
|
|
Enumerable for binary data (stored in the ``str`` type).
|
|
|
|
"""
|
|
|
|
|
|
|
|
type = unicode
|
|
|
|
|
|
|
|
|
|
|
|
class StrEnum(Enum):
|
|
|
|
"""
|
|
|
|
Enumerable for Unicode text (stored in the ``unicode`` type).
|
2009-01-18 17:03:02 -06:00
|
|
|
|
|
|
|
For example:
|
|
|
|
|
|
|
|
>>> enum = StrEnum('my_enum', values=(u'One', u'Two', u'Three'))
|
2010-10-11 21:27:57 -05:00
|
|
|
>>> enum.validate(u'Two', 'cli') is None
|
2009-01-18 17:03:02 -06:00
|
|
|
True
|
2010-10-11 21:27:57 -05:00
|
|
|
>>> enum.validate(u'Four', 'cli')
|
2009-01-18 17:03:02 -06:00
|
|
|
Traceback (most recent call last):
|
|
|
|
...
|
2012-09-27 07:18:02 -05:00
|
|
|
ValidationError: invalid 'my_enum': must be one of 'One', 'Two', 'Three'
|
2009-01-18 16:55:56 -06:00
|
|
|
"""
|
|
|
|
|
|
|
|
type = unicode
|
|
|
|
|
|
|
|
|
2013-10-09 03:16:46 -05:00
|
|
|
class IntEnum(Enum):
|
|
|
|
"""
|
|
|
|
Enumerable for integer data (stored in the ``int`` type).
|
|
|
|
"""
|
|
|
|
|
|
|
|
type = int
|
2015-08-12 05:07:52 -05:00
|
|
|
allowed_types = six.integer_types
|
2013-10-09 03:16:46 -05:00
|
|
|
type_error = Int.type_error
|
|
|
|
|
|
|
|
def _convert_scalar(self, value, index=None):
|
|
|
|
"""
|
|
|
|
Convert a single scalar value.
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
return Int.convert_int(value)
|
|
|
|
except ValueError:
|
|
|
|
raise ConversionError(name=self.get_param_name(),
|
|
|
|
error=ugettext(self.type_error))
|
|
|
|
|
|
|
|
|
2011-11-21 09:50:27 -06:00
|
|
|
class Any(Param):
|
2009-03-18 14:44:53 -05:00
|
|
|
"""
|
2011-11-21 09:50:27 -06:00
|
|
|
A parameter capable of holding values of any type. For internal use only.
|
2009-03-18 14:44:53 -05:00
|
|
|
"""
|
|
|
|
|
2011-11-21 09:50:27 -06:00
|
|
|
type = object
|
2009-03-18 14:44:53 -05:00
|
|
|
|
|
|
|
def _convert_scalar(self, value, index=None):
|
|
|
|
return value
|
|
|
|
|
|
|
|
def _validate_scalar(self, value, index=None):
|
2010-11-03 10:30:03 -05:00
|
|
|
for rule in self.all_rules:
|
|
|
|
error = rule(ugettext, value)
|
|
|
|
if error is not None:
|
2016-05-23 06:20:27 -05:00
|
|
|
raise ValidationError(name=self.name, error=error)
|
2009-03-18 14:44:53 -05:00
|
|
|
|
2009-09-30 09:24:25 -05:00
|
|
|
|
2009-11-05 09:15:47 -06:00
|
|
|
class File(Str):
|
|
|
|
"""
|
|
|
|
File parameter type.
|
|
|
|
|
|
|
|
Accepts file names and loads their content into the parameter value.
|
|
|
|
"""
|
2011-07-22 09:23:31 -05:00
|
|
|
kwargs = Data.kwargs + (
|
2009-11-05 09:15:47 -06:00
|
|
|
# valid for CLI, other backends (e.g. webUI) can ignore this
|
|
|
|
('stdin_if_missing', bool, False),
|
2011-07-22 09:23:31 -05:00
|
|
|
('noextrawhitespace', bool, False),
|
2009-11-05 09:15:47 -06:00
|
|
|
)
|
|
|
|
|
2014-01-09 04:14:56 -06:00
|
|
|
class DateTime(Param):
|
|
|
|
"""
|
|
|
|
DateTime parameter type.
|
|
|
|
|
|
|
|
Accepts LDAP Generalized time without in the following format:
|
|
|
|
'%Y%m%d%H%M%SZ'
|
|
|
|
|
|
|
|
Accepts subset of values defined by ISO 8601:
|
|
|
|
'%Y-%m-%dT%H:%M:%SZ'
|
|
|
|
'%Y-%m-%dT%H:%MZ'
|
|
|
|
'%Y-%m-%dZ'
|
|
|
|
|
|
|
|
Also accepts above formats using ' ' (space) as a separator instead of 'T'.
|
|
|
|
|
|
|
|
Refer to the `man strftime` for the explanations for the %Y,%m,%d,%H.%M,%S.
|
|
|
|
"""
|
|
|
|
|
|
|
|
accepted_formats = [LDAP_GENERALIZED_TIME_FORMAT, # generalized time
|
|
|
|
'%Y-%m-%dT%H:%M:%SZ', # ISO 8601, second precision
|
|
|
|
'%Y-%m-%dT%H:%MZ', # ISO 8601, minute precision
|
|
|
|
'%Y-%m-%dZ', # ISO 8601, date only
|
|
|
|
'%Y-%m-%d %H:%M:%SZ', # non-ISO 8601, second precision
|
|
|
|
'%Y-%m-%d %H:%MZ'] # non-ISO 8601, minute precision
|
|
|
|
|
|
|
|
|
|
|
|
type = datetime.datetime
|
|
|
|
type_error = _('must be datetime value')
|
|
|
|
|
|
|
|
def _convert_scalar(self, value, index=None):
|
2015-08-10 11:29:33 -05:00
|
|
|
if isinstance(value, six.string_types):
|
2014-01-09 04:14:56 -06:00
|
|
|
for date_format in self.accepted_formats:
|
|
|
|
try:
|
|
|
|
time = datetime.datetime.strptime(value, date_format)
|
|
|
|
return time
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
# If we get here, the strptime call did not succeed for any
|
|
|
|
# the accepted formats, therefore raise error
|
|
|
|
|
|
|
|
error = (_("does not match any of accepted formats: ") +
|
|
|
|
(', '.join(self.accepted_formats)))
|
|
|
|
|
|
|
|
raise ConversionError(name=self.get_param_name(),
|
|
|
|
error=error)
|
|
|
|
|
2016-05-23 06:20:27 -05:00
|
|
|
return super(DateTime, self)._convert_scalar(value)
|
2014-01-09 04:14:56 -06:00
|
|
|
|
2009-11-05 09:15:47 -06:00
|
|
|
|
2009-11-18 10:33:55 -06:00
|
|
|
class AccessTime(Str):
|
2009-09-30 09:24:25 -05:00
|
|
|
"""
|
2009-11-18 10:33:55 -06:00
|
|
|
Access time parameter type.
|
2009-09-30 09:24:25 -05:00
|
|
|
|
|
|
|
Accepts values conforming to generalizedTime as defined in RFC 4517
|
|
|
|
section 3.3.13 without time zone information.
|
|
|
|
"""
|
|
|
|
def _check_HHMM(self, t):
|
|
|
|
if len(t) != 4:
|
|
|
|
raise ValueError('HHMM must be exactly 4 characters long')
|
|
|
|
if not t.isnumeric():
|
|
|
|
raise ValueError('HHMM non-numeric')
|
|
|
|
hh = int(t[0:2])
|
|
|
|
if hh < 0 or hh > 23:
|
|
|
|
raise ValueError('HH out of range')
|
|
|
|
mm = int(t[2:4])
|
|
|
|
if mm < 0 or mm > 59:
|
|
|
|
raise ValueError('MM out of range')
|
2009-10-13 12:28:00 -05:00
|
|
|
|
2009-09-30 09:24:25 -05:00
|
|
|
def _check_dotw(self, t):
|
|
|
|
if t.isnumeric():
|
|
|
|
value = int(t)
|
|
|
|
if value < 1 or value > 7:
|
|
|
|
raise ValueError('day of the week out of range')
|
|
|
|
elif t not in ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'):
|
|
|
|
raise ValueError('invalid day of the week')
|
|
|
|
|
|
|
|
def _check_dotm(self, t, month_num=1, year=4):
|
|
|
|
if not t.isnumeric():
|
|
|
|
raise ValueError('day of the month non-numeric')
|
|
|
|
value = int(t)
|
|
|
|
if month_num in (1, 3, 5, 7, 8, 10, 12):
|
|
|
|
if value < 1 or value > 31:
|
|
|
|
raise ValueError('day of the month out of range')
|
|
|
|
elif month_num in (4, 6, 9, 11):
|
|
|
|
if value < 1 or value > 30:
|
|
|
|
raise ValueError('day of the month out of range')
|
|
|
|
elif month_num == 2:
|
|
|
|
if year % 4 == 0 and (year % 100 != 0 or year % 400 == 0):
|
|
|
|
if value < 1 or value > 29:
|
|
|
|
raise ValueError('day of the month out of range')
|
|
|
|
else:
|
|
|
|
if value < 1 or value > 28:
|
|
|
|
raise ValueError('day of the month out of range')
|
|
|
|
|
|
|
|
def _check_wotm(self, t):
|
|
|
|
if not t.isnumeric():
|
|
|
|
raise ValueError('week of the month non-numeric')
|
|
|
|
value = int(t)
|
2010-06-03 08:25:25 -05:00
|
|
|
if value < 1 or value > 6:
|
2009-09-30 09:24:25 -05:00
|
|
|
raise ValueError('week of the month out of range')
|
|
|
|
|
|
|
|
def _check_woty(self, t):
|
|
|
|
if not t.isnumeric():
|
|
|
|
raise ValueError('week of the year non-numeric')
|
|
|
|
value = int(t)
|
|
|
|
if value < 1 or value > 52:
|
|
|
|
raise ValueError('week of the year out of range')
|
|
|
|
|
2011-01-25 11:46:26 -06:00
|
|
|
def _check_doty(self, t):
|
|
|
|
if not t.isnumeric():
|
|
|
|
raise ValueError('day of the year non-numeric')
|
|
|
|
value = int(t)
|
|
|
|
if value < 1 or value > 365:
|
|
|
|
raise ValueError('day of the year out of range')
|
|
|
|
|
2009-09-30 09:24:25 -05:00
|
|
|
def _check_month_num(self, t):
|
|
|
|
if not t.isnumeric():
|
|
|
|
raise ValueError('month number non-numeric')
|
|
|
|
value = int(t)
|
|
|
|
if value < 1 or value > 12:
|
2009-10-13 12:28:00 -05:00
|
|
|
raise ValueError('month number out of range')
|
2009-09-30 09:24:25 -05:00
|
|
|
|
|
|
|
def _check_interval(self, t, check_func):
|
|
|
|
intervals = t.split(',')
|
|
|
|
for i in intervals:
|
|
|
|
if not i:
|
|
|
|
raise ValueError('invalid time range')
|
|
|
|
values = i.split('-')
|
|
|
|
if len(values) > 2:
|
|
|
|
raise ValueError('invalid time range')
|
|
|
|
for v in values:
|
|
|
|
check_func(v)
|
|
|
|
if len(values) == 2:
|
2010-04-30 11:02:28 -05:00
|
|
|
if int(values[0]) > int(values[1]):
|
2009-09-30 09:24:25 -05:00
|
|
|
raise ValueError('invalid time range')
|
|
|
|
|
|
|
|
def _check_W_spec(self, ts, index):
|
|
|
|
if ts[index] != 'day':
|
|
|
|
raise ValueError('invalid week specifier')
|
|
|
|
index += 1
|
|
|
|
self._check_interval(ts[index], self._check_dotw)
|
|
|
|
return index
|
|
|
|
|
|
|
|
def _check_M_spec(self, ts, index):
|
|
|
|
if ts[index] == 'week':
|
|
|
|
self._check_interval(ts[index + 1], self._check_wotm)
|
|
|
|
index = self._check_W_spec(ts, index + 2)
|
|
|
|
elif ts[index] == 'day':
|
|
|
|
index += 1
|
|
|
|
self._check_interval(ts[index], self._check_dotm)
|
|
|
|
else:
|
|
|
|
raise ValueError('invalid month specifier')
|
|
|
|
return index
|
|
|
|
|
|
|
|
def _check_Y_spec(self, ts, index):
|
|
|
|
if ts[index] == 'month':
|
|
|
|
index += 1
|
|
|
|
self._check_interval(ts[index], self._check_month_num)
|
|
|
|
month_num = int(ts[index])
|
2010-04-30 11:02:28 -05:00
|
|
|
index = self._check_M_spec(ts, index + 1)
|
2009-09-30 09:24:25 -05:00
|
|
|
elif ts[index] == 'week':
|
|
|
|
self._check_interval(ts[index + 1], self._check_woty)
|
|
|
|
index = self._check_W_spec(ts, index + 2)
|
|
|
|
elif ts[index] == 'day':
|
|
|
|
index += 1
|
|
|
|
self._check_interval(ts[index], self._check_doty)
|
|
|
|
else:
|
|
|
|
raise ValueError('invalid year specifier')
|
|
|
|
return index
|
|
|
|
|
|
|
|
def _check_generalized(self, t):
|
2010-04-30 11:02:28 -05:00
|
|
|
assert type(t) is unicode
|
2009-09-30 09:24:25 -05:00
|
|
|
if len(t) not in (10, 12, 14):
|
|
|
|
raise ValueError('incomplete generalized time')
|
|
|
|
if not t.isnumeric():
|
2009-11-18 10:33:55 -06:00
|
|
|
raise ValueError('time non-numeric')
|
2009-09-30 09:24:25 -05:00
|
|
|
# don't check year value, with time travel and all :)
|
|
|
|
self._check_month_num(t[4:6])
|
|
|
|
year_num = int(t[0:4])
|
|
|
|
month_num = int(t[4:6])
|
|
|
|
self._check_dotm(t[6:8], month_num, year_num)
|
|
|
|
if len(t) >= 12:
|
|
|
|
self._check_HHMM(t[8:12])
|
|
|
|
else:
|
|
|
|
self._check_HHMM('%s00' % t[8:10])
|
|
|
|
if len(t) == 14:
|
|
|
|
s = int(t[12:14])
|
|
|
|
if s < 0 or s > 60:
|
|
|
|
raise ValueError('seconds out of range')
|
|
|
|
|
|
|
|
def _check(self, time):
|
|
|
|
ts = time.split()
|
|
|
|
if ts[0] == 'absolute':
|
2010-04-30 11:02:28 -05:00
|
|
|
if len(ts) != 4:
|
|
|
|
raise ValueError('invalid format, must be \'absolute generalizedTime ~ generalizedTime\'')
|
2009-09-30 09:24:25 -05:00
|
|
|
self._check_generalized(ts[1])
|
|
|
|
if ts[2] != '~':
|
|
|
|
raise ValueError('invalid time range separator')
|
|
|
|
self._check_generalized(ts[3])
|
|
|
|
if int(ts[1]) >= int(ts[3]):
|
2009-11-18 10:33:55 -06:00
|
|
|
raise ValueError('invalid time range')
|
2009-09-30 09:24:25 -05:00
|
|
|
elif ts[0] == 'periodic':
|
2010-04-30 11:02:28 -05:00
|
|
|
index = None
|
2009-09-30 09:24:25 -05:00
|
|
|
if ts[1] == 'yearly':
|
|
|
|
index = self._check_Y_spec(ts, 2)
|
|
|
|
elif ts[1] == 'monthly':
|
|
|
|
index = self._check_M_spec(ts, 2)
|
2010-05-04 13:12:55 -05:00
|
|
|
elif ts[1] == 'weekly':
|
|
|
|
index = self._check_W_spec(ts, 2)
|
2009-09-30 09:24:25 -05:00
|
|
|
elif ts[1] == 'daily':
|
|
|
|
index = 1
|
2010-04-30 11:02:28 -05:00
|
|
|
if index is None:
|
|
|
|
raise ValueError('period must be yearly, monthy or daily, got \'%s\'' % ts[1])
|
2009-09-30 09:24:25 -05:00
|
|
|
self._check_interval(ts[index + 1], self._check_HHMM)
|
|
|
|
else:
|
|
|
|
raise ValueError('time neither absolute or periodic')
|
|
|
|
|
|
|
|
def _rule_required(self, _, value):
|
|
|
|
try:
|
|
|
|
self._check(value)
|
2015-07-30 09:49:29 -05:00
|
|
|
except ValueError as e:
|
2012-03-16 12:30:59 -05:00
|
|
|
raise ValidationError(name=self.get_param_name(), error=e.args[0])
|
2009-09-30 09:24:25 -05:00
|
|
|
except IndexError:
|
|
|
|
raise ValidationError(
|
2012-07-04 07:52:47 -05:00
|
|
|
name=self.get_param_name(), error=ugettext('incomplete time value')
|
2009-09-30 09:24:25 -05:00
|
|
|
)
|
2009-10-13 12:28:00 -05:00
|
|
|
return None
|
2009-09-30 09:24:25 -05:00
|
|
|
|
|
|
|
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 06:36:35 -05:00
|
|
|
class DNParam(Param):
|
|
|
|
type = DN
|
|
|
|
|
|
|
|
def _convert_scalar(self, value, index=None):
|
|
|
|
"""
|
|
|
|
Convert a single scalar value.
|
|
|
|
"""
|
2013-09-30 11:45:37 -05:00
|
|
|
if type(value) in self.allowed_types:
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 06:36:35 -05:00
|
|
|
return value
|
|
|
|
|
|
|
|
try:
|
|
|
|
dn = DN(value)
|
2015-07-30 09:49:29 -05:00
|
|
|
except Exception as e:
|
2016-05-23 06:20:27 -05:00
|
|
|
raise ConversionError(name=self.get_param_name(),
|
Use DN objects instead of strings
* Convert every string specifying a DN into a DN object
* Every place a dn was manipulated in some fashion it was replaced by
the use of DN operators
* Add new DNParam parameter type for parameters which are DN's
* DN objects are used 100% of the time throughout the entire data
pipeline whenever something is logically a dn.
* Many classes now enforce DN usage for their attributes which are
dn's. This is implmented via ipautil.dn_attribute_property(). The
only permitted types for a class attribute specified to be a DN are
either None or a DN object.
* Require that every place a dn is used it must be a DN object.
This translates into lot of::
assert isinstance(dn, DN)
sprinkled through out the code. Maintaining these asserts is
valuable to preserve DN type enforcement. The asserts can be
disabled in production.
The goal of 100% DN usage 100% of the time has been realized, these
asserts are meant to preserve that.
The asserts also proved valuable in detecting functions which did
not obey their function signatures, such as the baseldap pre and
post callbacks.
* Moved ipalib.dn to ipapython.dn because DN class is shared with all
components, not just the server which uses ipalib.
* All API's now accept DN's natively, no need to convert to str (or
unicode).
* Removed ipalib.encoder and encode/decode decorators. Type conversion
is now explicitly performed in each IPASimpleLDAPObject method which
emulates a ldap.SimpleLDAPObject method.
* Entity & Entry classes now utilize DN's
* Removed __getattr__ in Entity & Entity clases. There were two
problems with it. It presented synthetic Python object attributes
based on the current LDAP data it contained. There is no way to
validate synthetic attributes using code checkers, you can't search
the code to find LDAP attribute accesses (because synthetic
attriutes look like Python attributes instead of LDAP data) and
error handling is circumscribed. Secondly __getattr__ was hiding
Python internal methods which broke class semantics.
* Replace use of methods inherited from ldap.SimpleLDAPObject via
IPAdmin class with IPAdmin methods. Directly using inherited methods
was causing us to bypass IPA logic. Mostly this meant replacing the
use of search_s() with getEntry() or getList(). Similarly direct
access of the LDAP data in classes using IPAdmin were replaced with
calls to getValue() or getValues().
* Objects returned by ldap2.find_entries() are now compatible with
either the python-ldap access methodology or the Entity/Entry access
methodology.
* All ldap operations now funnel through the common
IPASimpleLDAPObject giving us a single location where we interface
to python-ldap and perform conversions.
* The above 4 modifications means we've greatly reduced the
proliferation of multiple inconsistent ways to perform LDAP
operations. We are well on the way to having a single API in IPA for
doing LDAP (a long range goal).
* All certificate subject bases are now DN's
* DN objects were enhanced thusly:
- find, rfind, index, rindex, replace and insert methods were added
- AVA, RDN and DN classes were refactored in immutable and mutable
variants, the mutable variants are EditableAVA, EditableRDN and
EditableDN. By default we use the immutable variants preserving
important semantics. To edit a DN cast it to an EditableDN and
cast it back to DN when done editing. These issues are fully
described in other documentation.
- first_key_match was removed
- DN equalty comparison permits comparison to a basestring
* Fixed ldapupdate to work with DN's. This work included:
- Enhance test_updates.py to do more checking after applying
update. Add test for update_from_dict(). Convert code to use
unittest classes.
- Consolidated duplicate code.
- Moved code which should have been in the class into the class.
- Fix the handling of the 'deleteentry' update action. It's no longer
necessary to supply fake attributes to make it work. Detect case
where subsequent update applies a change to entry previously marked
for deletetion. General clean-up and simplification of the
'deleteentry' logic.
- Rewrote a couple of functions to be clearer and more Pythonic.
- Added documentation on the data structure being used.
- Simplfy the use of update_from_dict()
* Removed all usage of get_schema() which was being called prior to
accessing the .schema attribute of an object. If a class is using
internal lazy loading as an optimization it's not right to require
users of the interface to be aware of internal
optimization's. schema is now a property and when the schema
property is accessed it calls a private internal method to perform
the lazy loading.
* Added SchemaCache class to cache the schema's from individual
servers. This was done because of the observation we talk to
different LDAP servers, each of which may have it's own
schema. Previously we globally cached the schema from the first
server we connected to and returned that schema in all contexts. The
cache includes controls to invalidate it thus forcing a schema
refresh.
* Schema caching is now senstive to the run time context. During
install and upgrade the schema can change leading to errors due to
out-of-date cached schema. The schema cache is refreshed in these
contexts.
* We are aware of the LDAP syntax of all LDAP attributes. Every
attribute returned from an LDAP operation is passed through a
central table look-up based on it's LDAP syntax. The table key is
the LDAP syntax it's value is a Python callable that returns a
Python object matching the LDAP syntax. There are a handful of LDAP
attributes whose syntax is historically incorrect
(e.g. DistguishedNames that are defined as DirectoryStrings). The
table driven conversion mechanism is augmented with a table of
hard coded exceptions.
Currently only the following conversions occur via the table:
- dn's are converted to DN objects
- binary objects are converted to Python str objects (IPA
convention).
- everything else is converted to unicode using UTF-8 decoding (IPA
convention).
However, now that the table driven conversion mechanism is in place
it would be trivial to do things such as converting attributes
which have LDAP integer syntax into a Python integer, etc.
* Expected values in the unit tests which are a DN no longer need to
use lambda expressions to promote the returned value to a DN for
equality comparison. The return value is automatically promoted to
a DN. The lambda expressions have been removed making the code much
simpler and easier to read.
* Add class level logging to a number of classes which did not support
logging, less need for use of root_logger.
* Remove ipaserver/conn.py, it was unused.
* Consolidated duplicate code wherever it was found.
* Fixed many places that used string concatenation to form a new
string rather than string formatting operators. This is necessary
because string formatting converts it's arguments to a string prior
to building the result string. You can't concatenate a string and a
non-string.
* Simplify logic in rename_managed plugin. Use DN operators to edit
dn's.
* The live version of ipa-ldap-updater did not generate a log file.
The offline version did, now both do.
https://fedorahosted.org/freeipa/ticket/1670
https://fedorahosted.org/freeipa/ticket/1671
https://fedorahosted.org/freeipa/ticket/1672
https://fedorahosted.org/freeipa/ticket/1673
https://fedorahosted.org/freeipa/ticket/1674
https://fedorahosted.org/freeipa/ticket/1392
https://fedorahosted.org/freeipa/ticket/2872
2012-05-13 06:36:35 -05:00
|
|
|
error=ugettext(e))
|
|
|
|
return dn
|
|
|
|
|
2013-04-12 10:38:09 -05:00
|
|
|
|
|
|
|
class DeprecatedParam(Any):
|
|
|
|
kwargs = Param.kwargs + (
|
|
|
|
('deprecate', bool, True),
|
|
|
|
)
|
|
|
|
|
|
|
|
def __init__(self, name, *rules, **kw):
|
|
|
|
if 'flags' in kw:
|
|
|
|
kw['flags'] = list(kw['flags']) + ['no_option']
|
|
|
|
else:
|
|
|
|
kw['flags'] = ['no_option']
|
|
|
|
|
|
|
|
super(DeprecatedParam, self).__init__(name, *rules, **kw)
|
|
|
|
|
|
|
|
def _rule_deprecate(self, _, value):
|
|
|
|
return _('this option is deprecated')
|
|
|
|
|
2009-01-13 01:27:06 -06:00
|
|
|
def create_param(spec):
|
|
|
|
"""
|
|
|
|
Create an `Str` instance from the shorthand ``spec``.
|
|
|
|
|
|
|
|
This function allows you to create `Str` parameters (the most common) from
|
|
|
|
a convenient shorthand that defines the parameter name, whether it is
|
2009-01-13 03:17:16 -06:00
|
|
|
required, and whether it is multivalue. (For the definition of the
|
|
|
|
shorthand syntax, see the `parse_param_spec()` function.)
|
2009-01-13 01:27:06 -06:00
|
|
|
|
|
|
|
If ``spec`` is an ``str`` instance, it will be used to create a new `Str`
|
|
|
|
parameter, which will be returned. For example:
|
|
|
|
|
|
|
|
>>> s = create_param('hometown?')
|
|
|
|
>>> s
|
|
|
|
Str('hometown?')
|
|
|
|
>>> (s.name, s.required, s.multivalue)
|
|
|
|
('hometown', False, False)
|
|
|
|
|
|
|
|
On the other hand, if ``spec`` is already a `Param` instance, it is
|
|
|
|
returned unchanged. For example:
|
|
|
|
|
|
|
|
>>> b = Bytes('cert')
|
|
|
|
>>> create_param(b) is b
|
|
|
|
True
|
|
|
|
|
|
|
|
As a plugin author, you will not call this function directly (which would
|
|
|
|
be no more convenient than simply creating the `Str` instance). Instead,
|
|
|
|
`frontend.Command` will call it for you when it evaluates the
|
|
|
|
``takes_args`` and ``takes_options`` attributes, and `frontend.Object`
|
|
|
|
will call it for you when it evaluates the ``takes_params`` attribute.
|
|
|
|
|
|
|
|
:param spec: A spec string or a `Param` instance.
|
|
|
|
"""
|
|
|
|
if isinstance(spec, Param):
|
|
|
|
return spec
|
|
|
|
if type(spec) is not str:
|
|
|
|
raise TypeError(
|
|
|
|
TYPE_ERROR % ('spec', (str, Param), spec, type(spec))
|
|
|
|
)
|
|
|
|
return Str(spec)
|
2014-03-27 08:36:39 -05:00
|
|
|
|
|
|
|
|
|
|
|
class DNSNameParam(Param):
|
|
|
|
"""
|
|
|
|
Domain name parameter type.
|
|
|
|
|
|
|
|
:only_absolute a domain name has to be absolute
|
|
|
|
(makes it absolute from unicode input)
|
|
|
|
:only_relative a domain name has to be relative
|
|
|
|
"""
|
|
|
|
type = DNSName
|
|
|
|
type_error = _('must be DNS name')
|
|
|
|
kwargs = Param.kwargs + (
|
|
|
|
('only_absolute', bool, False),
|
|
|
|
('only_relative', bool, False),
|
|
|
|
)
|
|
|
|
|
|
|
|
def __init__(self, name, *rules, **kw):
|
|
|
|
super(DNSNameParam, self).__init__(name, *rules, **kw)
|
|
|
|
if self.only_absolute and self.only_relative:
|
|
|
|
raise ValueError('%s: cannot be both absolute and relative' %
|
|
|
|
self.nice)
|
|
|
|
|
|
|
|
def _convert_scalar(self, value, index=None):
|
|
|
|
if isinstance(value, unicode):
|
|
|
|
error = None
|
|
|
|
|
|
|
|
try:
|
2014-11-27 07:16:23 -06:00
|
|
|
validate_idna_domain(value)
|
|
|
|
except ValueError as e:
|
2016-05-23 06:20:27 -05:00
|
|
|
raise ConversionError(name=self.get_param_name(),
|
2014-11-27 07:16:23 -06:00
|
|
|
error=unicode(e))
|
|
|
|
value = DNSName(value)
|
2014-03-27 08:36:39 -05:00
|
|
|
|
|
|
|
if self.only_absolute and not value.is_absolute():
|
|
|
|
value = value.make_absolute()
|
|
|
|
|
2016-05-23 06:20:27 -05:00
|
|
|
return super(DNSNameParam, self)._convert_scalar(value)
|
2014-03-27 08:36:39 -05:00
|
|
|
|
|
|
|
def _rule_only_absolute(self, _, value):
|
|
|
|
if self.only_absolute and not value.is_absolute():
|
|
|
|
return _('must be absolute')
|
|
|
|
|
|
|
|
def _rule_only_relative(self, _, value):
|
|
|
|
if self.only_relative and value.is_absolute():
|
|
|
|
return _('must be relative')
|