From 49c15e93a21014f6640bfd0e0c6752bdc61d105c Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 20:22:18 +0100 Subject: [PATCH 01/29] Add pgen2 and custom utilities. --- sphinx/pycode/Grammar.txt | 155 ++++++++++++ sphinx/pycode/__init__.py | 142 +++++++++++ sphinx/pycode/pgen2/__init__.py | 4 + sphinx/pycode/pgen2/driver.py | 145 ++++++++++++ sphinx/pycode/pgen2/grammar.py | 171 ++++++++++++++ sphinx/pycode/pgen2/literals.py | 60 +++++ sphinx/pycode/pgen2/parse.py | 201 ++++++++++++++++ sphinx/pycode/pgen2/pgen.py | 384 ++++++++++++++++++++++++++++++ sphinx/pycode/pgen2/token.py | 82 +++++++ sphinx/pycode/pgen2/tokenize.py | 405 ++++++++++++++++++++++++++++++++ sphinx/pycode/pytree.py | 295 +++++++++++++++++++++++ 11 files changed, 2044 insertions(+) create mode 100644 sphinx/pycode/Grammar.txt create mode 100644 sphinx/pycode/__init__.py create mode 100644 sphinx/pycode/pgen2/__init__.py create mode 100644 sphinx/pycode/pgen2/driver.py create mode 100644 sphinx/pycode/pgen2/grammar.py create mode 100644 sphinx/pycode/pgen2/literals.py create mode 100644 sphinx/pycode/pgen2/parse.py create mode 100644 sphinx/pycode/pgen2/pgen.py create mode 100755 sphinx/pycode/pgen2/token.py create mode 100644 sphinx/pycode/pgen2/tokenize.py create mode 100644 sphinx/pycode/pytree.py diff --git a/sphinx/pycode/Grammar.txt b/sphinx/pycode/Grammar.txt new file mode 100644 index 000000000..1f4a50ffb --- /dev/null +++ b/sphinx/pycode/Grammar.txt @@ -0,0 +1,155 @@ +# Grammar for Python + +# Note: Changing the grammar specified in this file will most likely +# require corresponding changes in the parser module +# (../Modules/parsermodule.c). If you can't make the changes to +# that module yourself, please co-ordinate the required changes +# with someone who can; ask around on python-dev for help. Fred +# Drake will probably be listening there. + +# NOTE WELL: You should also follow all the steps listed in PEP 306, +# "How to Change Python's Grammar" + +# Commands for Kees Blom's railroad program +#diagram:token NAME +#diagram:token NUMBER +#diagram:token STRING +#diagram:token NEWLINE +#diagram:token ENDMARKER +#diagram:token INDENT +#diagram:output\input python.bla +#diagram:token DEDENT +#diagram:output\textwidth 20.04cm\oddsidemargin 0.0cm\evensidemargin 0.0cm +#diagram:rules + +# Start symbols for the grammar: +# file_input is a module or sequence of commands read from an input file; +# single_input is a single interactive statement; +# eval_input is the input for the eval() and input() functions. +# NB: compound_stmt in single_input is followed by extra NEWLINE! +file_input: (NEWLINE | stmt)* ENDMARKER +single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE +eval_input: testlist NEWLINE* ENDMARKER + +decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef) +funcdef: 'def' NAME parameters ['->' test] ':' suite +parameters: '(' [typedargslist] ')' +typedargslist: ((tfpdef ['=' test] ',')* + ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname) + | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) +tname: NAME [':' test] +tfpdef: tname | '(' tfplist ')' +tfplist: tfpdef (',' tfpdef)* [','] +varargslist: ((vfpdef ['=' test] ',')* + ('*' [vname] (',' vname ['=' test])* [',' '**' vname] | '**' vname) + | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) +vname: NAME +vfpdef: vname | '(' vfplist ')' +vfplist: vfpdef (',' vfpdef)* [','] + +stmt: simple_stmt | compound_stmt +simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE +small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | + import_stmt | global_stmt | exec_stmt | assert_stmt) +expr_stmt: testlist (augassign (yield_expr|testlist) | + ('=' (yield_expr|testlist))*) +augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | + '<<=' | '>>=' | '**=' | '//=') +# For normal assignments, additional restrictions enforced by the interpreter +print_stmt: 'print' ( [ test (',' test)* [','] ] | + '>>' test [ (',' test)+ [','] ] ) +del_stmt: 'del' exprlist +pass_stmt: 'pass' +flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: 'break' +continue_stmt: 'continue' +return_stmt: 'return' [testlist] +yield_stmt: yield_expr +raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] +import_stmt: import_name | import_from +import_name: 'import' dotted_as_names +import_from: ('from' ('.'* dotted_name | '.'+) + 'import' ('*' | '(' import_as_names ')' | import_as_names)) +import_as_name: NAME ['as' NAME] +dotted_as_name: dotted_name ['as' NAME] +import_as_names: import_as_name (',' import_as_name)* [','] +dotted_as_names: dotted_as_name (',' dotted_as_name)* +dotted_name: NAME ('.' NAME)* +global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* +exec_stmt: 'exec' expr ['in' test [',' test]] +assert_stmt: 'assert' test [',' test] + +compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated +if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] +while_stmt: 'while' test ':' suite ['else' ':' suite] +for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] +try_stmt: ('try' ':' suite + ((except_clause ':' suite)+ + ['else' ':' suite] + ['finally' ':' suite] | + 'finally' ':' suite)) +with_stmt: 'with' test [ with_var ] ':' suite +with_var: 'as' expr +# NB compile.c makes sure that the default except clause is last +except_clause: 'except' [test [(',' | 'as') test]] +suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT + +# Backward compatibility cruft to support: +# [ x for x in lambda: True, lambda: False if x() ] +# even while also allowing: +# lambda x: 5 if x else 2 +# (But not a mix of the two) +testlist_safe: old_test [(',' old_test)+ [',']] +old_test: or_test | old_lambdef +old_lambdef: 'lambda' [varargslist] ':' old_test + +test: or_test ['if' or_test 'else' test] | lambdef +or_test: and_test ('or' and_test)* +and_test: not_test ('and' not_test)* +not_test: 'not' not_test | comparison +comparison: expr (comp_op expr)* +comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' +expr: xor_expr ('|' xor_expr)* +xor_expr: and_expr ('^' and_expr)* +and_expr: shift_expr ('&' shift_expr)* +shift_expr: arith_expr (('<<'|'>>') arith_expr)* +arith_expr: term (('+'|'-') term)* +term: factor (('*'|'/'|'%'|'//') factor)* +factor: ('+'|'-'|'~') factor | power +power: atom trailer* ['**' factor] +atom: ('(' [yield_expr|testlist_gexp] ')' | + '[' [listmaker] ']' | + '{' [dictsetmaker] '}' | + '`' testlist1 '`' | + NAME | NUMBER | STRING+ | '.' '.' '.') +listmaker: test ( comp_for | (',' test)* [','] ) +testlist_gexp: test ( comp_for | (',' test)* [','] ) +lambdef: 'lambda' [varargslist] ':' test +trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME +subscriptlist: subscript (',' subscript)* [','] +subscript: test | [test] ':' [test] [sliceop] +sliceop: ':' [test] +exprlist: expr (',' expr)* [','] +testlist: test (',' test)* [','] +dictsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | + (test (comp_for | (',' test)* [','])) ) + +classdef: 'class' NAME ['(' [arglist] ')'] ':' suite + +arglist: (argument ',')* (argument [','] + |'*' test (',' argument)* [',' '**' test] + |'**' test) +argument: test [comp_for] | test '=' test # Really [keyword '='] test + +comp_iter: comp_for | comp_if +comp_for: 'for' exprlist 'in' testlist_safe [comp_iter] +comp_if: 'if' old_test [comp_iter] + +testlist1: test (',' test)* + +# not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: NAME + +yield_expr: 'yield' [testlist] diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py new file mode 100644 index 000000000..b5d83e679 --- /dev/null +++ b/sphinx/pycode/__init__.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +""" + sphinx.pycode + ~~~~~~~~~~~~~ + + Utilities parsing and analyzing Python code. + + :copyright: 2008 by Georg Brandl. + :license: BSD, see LICENSE for details. +""" + +import sys +from os import path + +from sphinx.pycode import pytree +from sphinx.pycode.pgen2 import driver, token + + +# load the Python grammar +_grammarfile = path.join(path.dirname(__file__), 'Grammar.txt') +pygrammar = driver.load_grammar(_grammarfile) +pydriver = driver.Driver(pygrammar, convert=pytree.convert) + +class sym: pass +for k, v in pygrammar.symbol2number.iteritems(): + setattr(sym, k, v) + +# a dict mapping terminal and nonterminal numbers to their names +number2name = pygrammar.number2symbol.copy() +number2name.update(token.tok_name) + + +def prepare_commentdoc(s): + result = [] + lines = [line.strip() for line in s.expandtabs().splitlines()] + for line in lines: + if line.startswith('#: '): + result.append(line[3:]) + if result and result[-1]: + result.append('') + return '\n'.join(result) + + +_eq = pytree.Leaf(token.EQUAL, '=') + + +class ClassAttrVisitor(pytree.NodeVisitor): + def init(self): + self.namespace = [] + + def visit_classdef(self, node): + self.namespace.append(node[1].value) + self.generic_visit(node) + self.namespace.pop() + + def visit_expr_stmt(self, node): + if _eq in node.children: + prefix = node[0].get_prefix() + if not prefix: + prev = node[0].get_prev_leaf() + if prev and prev.type == token.INDENT: + prefix = prev.prefix + doc = prepare_commentdoc(prefix) + if doc: + targ = '.'.join(self.namespace + [node[0].compact()]) + print targ + print doc + + def visit_funcdef(self, node): + return + + +class ModuleAnalyzer(object): + + def __init__(self, tree, modname, srcname): + self.tree = tree + self.modname = modname + self.srcname = srcname + + @classmethod + def for_string(cls, string, modname, srcname=''): + return cls(pydriver.parse_string(string), modname, srcname) + + @classmethod + def for_file(cls, filename, modname): + # XXX if raises + fileobj = open(filename, 'r') + try: + return cls(pydriver.parse_stream(fileobj), modname, filename) + finally: + fileobj.close() + + @classmethod + def for_module(cls, modname): + if modname not in sys.modules: + # XXX + __import__(modname) + mod = sys.modules[modname] + if hasattr(mod, '__loader__'): + # XXX raises + source = mod.__loader__.get_source(modname) + return cls.for_string(source, modname) + filename = getattr(mod, '__file__', None) + if filename is None: + # XXX + raise RuntimeError('no source found') + if filename.lower().endswith('.pyo') or \ + filename.lower().endswith('.pyc'): + filename = filename[:-1] + elif not filename.lower().endswith('.py'): + raise RuntimeError('not a .py file') + if not path.isfile(filename): + # XXX + raise RuntimeError('source not present') + return cls.for_file(filename, modname) + + def find_defs(self): + attr_visitor = ClassAttrVisitor(number2name) + attr_visitor.namespace = [self.modname] + attr_visitor.visit(self.tree) + +class Test: + """doc""" + + #: testing... + x = 1 + """doc""" + + #: testing more... + x = 2 + + +#ma = ModuleAnalyzer.for_file(__file__.rstrip('c')) +import time +x0=time.time() +ma = ModuleAnalyzer.for_module('sphinx.builders.latex') +x1=time.time() +ma.find_defs() +x2=time.time() +print "%.4f %.4f" % (x1-x0, x2-x1) + +#print pytree.nice_repr(ma.tree, number2name, True) diff --git a/sphinx/pycode/pgen2/__init__.py b/sphinx/pycode/pgen2/__init__.py new file mode 100644 index 000000000..af3904845 --- /dev/null +++ b/sphinx/pycode/pgen2/__init__.py @@ -0,0 +1,4 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""The pgen2 package.""" diff --git a/sphinx/pycode/pgen2/driver.py b/sphinx/pycode/pgen2/driver.py new file mode 100644 index 000000000..3e9e10435 --- /dev/null +++ b/sphinx/pycode/pgen2/driver.py @@ -0,0 +1,145 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# Modifications: +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Parser driver. + +This provides a high-level interface to parse a file into a syntax tree. + +""" + +__author__ = "Guido van Rossum " + +__all__ = ["Driver", "load_grammar"] + +# Python imports +import os +import logging +import sys + +# Pgen imports +from sphinx.pycode.pgen2 import grammar, parse, token, tokenize, pgen + + +class Driver(object): + + def __init__(self, grammar, convert=None, logger=None): + self.grammar = grammar + if logger is None: + logger = logging.getLogger() + self.logger = logger + self.convert = convert + + def parse_tokens(self, tokens, debug=False): + """Parse a series of tokens and return the syntax tree.""" + # XXX Move the prefix computation into a wrapper around tokenize. + p = parse.Parser(self.grammar, self.convert) + p.setup() + lineno = 1 + column = 0 + type = value = start = end = line_text = None + prefix = "" + for quintuple in tokens: + type, value, start, end, line_text = quintuple + if start != (lineno, column): + assert (lineno, column) <= start, ((lineno, column), start) + s_lineno, s_column = start + if lineno < s_lineno: + prefix += "\n" * (s_lineno - lineno) + lineno = s_lineno + column = 0 + if column < s_column: + prefix += line_text[column:s_column] + column = s_column + if type in (tokenize.COMMENT, tokenize.NL): + prefix += value + lineno, column = end + if value.endswith("\n"): + lineno += 1 + column = 0 + continue + if type == token.OP: + type = grammar.opmap[value] + if debug: + self.logger.debug("%s %r (prefix=%r)", + token.tok_name[type], value, prefix) + if p.addtoken(type, value, (prefix, start)): + if debug: + self.logger.debug("Stop.") + break + prefix = "" + lineno, column = end + if value.endswith("\n"): + lineno += 1 + column = 0 + else: + # We never broke out -- EOF is too soon (how can this happen???) + raise parse.ParseError("incomplete input", t, v, x) + return p.rootnode + + def parse_stream_raw(self, stream, debug=False): + """Parse a stream and return the syntax tree.""" + tokens = tokenize.generate_tokens(stream.readline) + return self.parse_tokens(tokens, debug) + + def parse_stream(self, stream, debug=False): + """Parse a stream and return the syntax tree.""" + return self.parse_stream_raw(stream, debug) + + def parse_file(self, filename, debug=False): + """Parse a file and return the syntax tree.""" + stream = open(filename) + try: + return self.parse_stream(stream, debug) + finally: + stream.close() + + def parse_string(self, text, debug=False): + """Parse a string and return the syntax tree.""" + tokens = tokenize.generate_tokens(generate_lines(text).next) + return self.parse_tokens(tokens, debug) + + +def generate_lines(text): + """Generator that behaves like readline without using StringIO.""" + for line in text.splitlines(True): + yield line + while True: + yield "" + + +def load_grammar(gt="Grammar.txt", gp=None, + save=True, force=False, logger=None): + """Load the grammar (maybe from a pickle).""" + if logger is None: + logger = logging.getLogger() + if gp is None: + head, tail = os.path.splitext(gt) + if tail == ".txt": + tail = "" + gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle" + if force or not _newer(gp, gt): + logger.info("Generating grammar tables from %s", gt) + g = pgen.generate_grammar(gt) + if save: + logger.info("Writing grammar tables to %s", gp) + try: + g.dump(gp) + except IOError, e: + logger.info("Writing failed:"+str(e)) + else: + g = grammar.Grammar() + g.load(gp) + return g + + +def _newer(a, b): + """Inquire whether file a was written since file b.""" + if not os.path.exists(a): + return False + if not os.path.exists(b): + return True + return os.path.getmtime(a) >= os.path.getmtime(b) diff --git a/sphinx/pycode/pgen2/grammar.py b/sphinx/pycode/pgen2/grammar.py new file mode 100644 index 000000000..381d80e82 --- /dev/null +++ b/sphinx/pycode/pgen2/grammar.py @@ -0,0 +1,171 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""This module defines the data structures used to represent a grammar. + +These are a bit arcane because they are derived from the data +structures used by Python's 'pgen' parser generator. + +There's also a table here mapping operators to their names in the +token module; the Python tokenize module reports all operators as the +fallback token code OP, but the parser needs the actual token code. + +""" + +# Python imports +import pickle + +# Local imports +from sphinx.pycode.pgen2 import token, tokenize + + +class Grammar(object): + """Pgen parsing tables tables conversion class. + + Once initialized, this class supplies the grammar tables for the + parsing engine implemented by parse.py. The parsing engine + accesses the instance variables directly. The class here does not + provide initialization of the tables; several subclasses exist to + do this (see the conv and pgen modules). + + The load() method reads the tables from a pickle file, which is + much faster than the other ways offered by subclasses. The pickle + file is written by calling dump() (after loading the grammar + tables using a subclass). The report() method prints a readable + representation of the tables to stdout, for debugging. + + The instance variables are as follows: + + symbol2number -- a dict mapping symbol names to numbers. Symbol + numbers are always 256 or higher, to distinguish + them from token numbers, which are between 0 and + 255 (inclusive). + + number2symbol -- a dict mapping numbers to symbol names; + these two are each other's inverse. + + states -- a list of DFAs, where each DFA is a list of + states, each state is is a list of arcs, and each + arc is a (i, j) pair where i is a label and j is + a state number. The DFA number is the index into + this list. (This name is slightly confusing.) + Final states are represented by a special arc of + the form (0, j) where j is its own state number. + + dfas -- a dict mapping symbol numbers to (DFA, first) + pairs, where DFA is an item from the states list + above, and first is a set of tokens that can + begin this grammar rule (represented by a dict + whose values are always 1). + + labels -- a list of (x, y) pairs where x is either a token + number or a symbol number, and y is either None + or a string; the strings are keywords. The label + number is the index in this list; label numbers + are used to mark state transitions (arcs) in the + DFAs. + + start -- the number of the grammar's start symbol. + + keywords -- a dict mapping keyword strings to arc labels. + + tokens -- a dict mapping token numbers to arc labels. + + """ + + def __init__(self): + self.symbol2number = {} + self.number2symbol = {} + self.states = [] + self.dfas = {} + self.labels = [(0, "EMPTY")] + self.keywords = {} + self.tokens = {} + self.symbol2label = {} + self.start = 256 + + def dump(self, filename): + """Dump the grammar tables to a pickle file.""" + f = open(filename, "wb") + pickle.dump(self.__dict__, f, 2) + f.close() + + def load(self, filename): + """Load the grammar tables from a pickle file.""" + f = open(filename, "rb") + d = pickle.load(f) + f.close() + self.__dict__.update(d) + + def report(self): + """Dump the grammar tables to standard output, for debugging.""" + from pprint import pprint + print "s2n" + pprint(self.symbol2number) + print "n2s" + pprint(self.number2symbol) + print "states" + pprint(self.states) + print "dfas" + pprint(self.dfas) + print "labels" + pprint(self.labels) + print "start", self.start + + +# Map from operator to number (since tokenize doesn't do this) + +opmap_raw = """ +( LPAR +) RPAR +[ LSQB +] RSQB +: COLON +, COMMA +; SEMI ++ PLUS +- MINUS +* STAR +/ SLASH +| VBAR +& AMPER +< LESS +> GREATER += EQUAL +. DOT +% PERCENT +` BACKQUOTE +{ LBRACE +} RBRACE +@ AT +== EQEQUAL +!= NOTEQUAL +<> NOTEQUAL +<= LESSEQUAL +>= GREATEREQUAL +~ TILDE +^ CIRCUMFLEX +<< LEFTSHIFT +>> RIGHTSHIFT +** DOUBLESTAR ++= PLUSEQUAL +-= MINEQUAL +*= STAREQUAL +/= SLASHEQUAL +%= PERCENTEQUAL +&= AMPEREQUAL +|= VBAREQUAL +^= CIRCUMFLEXEQUAL +<<= LEFTSHIFTEQUAL +>>= RIGHTSHIFTEQUAL +**= DOUBLESTAREQUAL +// DOUBLESLASH +//= DOUBLESLASHEQUAL +-> RARROW +""" + +opmap = {} +for line in opmap_raw.splitlines(): + if line: + op, name = line.split() + opmap[op] = getattr(token, name) diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py new file mode 100644 index 000000000..0b3948a54 --- /dev/null +++ b/sphinx/pycode/pgen2/literals.py @@ -0,0 +1,60 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Safely evaluate Python string literals without using eval().""" + +import re + +simple_escapes = {"a": "\a", + "b": "\b", + "f": "\f", + "n": "\n", + "r": "\r", + "t": "\t", + "v": "\v", + "'": "'", + '"': '"', + "\\": "\\"} + +def escape(m): + all, tail = m.group(0, 1) + assert all.startswith("\\") + esc = simple_escapes.get(tail) + if esc is not None: + return esc + if tail.startswith("x"): + hexes = tail[1:] + if len(hexes) < 2: + raise ValueError("invalid hex string escape ('\\%s')" % tail) + try: + i = int(hexes, 16) + except ValueError: + raise ValueError("invalid hex string escape ('\\%s')" % tail) + else: + try: + i = int(tail, 8) + except ValueError: + raise ValueError("invalid octal string escape ('\\%s')" % tail) + return chr(i) + +def evalString(s): + assert s.startswith("'") or s.startswith('"'), repr(s[:1]) + q = s[0] + if s[:3] == q*3: + q = q*3 + assert s.endswith(q), repr(s[-len(q):]) + assert len(s) >= 2*len(q) + s = s[len(q):-len(q)] + return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s) + +def test(): + for i in range(256): + c = chr(i) + s = repr(c) + e = evalString(s) + if e != c: + print i, c, s, e + + +if __name__ == "__main__": + test() diff --git a/sphinx/pycode/pgen2/parse.py b/sphinx/pycode/pgen2/parse.py new file mode 100644 index 000000000..60eec05ea --- /dev/null +++ b/sphinx/pycode/pgen2/parse.py @@ -0,0 +1,201 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Parser engine for the grammar tables generated by pgen. + +The grammar table must be loaded first. + +See Parser/parser.c in the Python distribution for additional info on +how this parsing engine works. + +""" + +# Local imports +from sphinx.pycode.pgen2 import token + +class ParseError(Exception): + """Exception to signal the parser is stuck.""" + + def __init__(self, msg, type, value, context): + Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % + (msg, type, value, context)) + self.msg = msg + self.type = type + self.value = value + self.context = context + +class Parser(object): + """Parser engine. + + The proper usage sequence is: + + p = Parser(grammar, [converter]) # create instance + p.setup([start]) # prepare for parsing + : + if p.addtoken(...): # parse a token; may raise ParseError + break + root = p.rootnode # root of abstract syntax tree + + A Parser instance may be reused by calling setup() repeatedly. + + A Parser instance contains state pertaining to the current token + sequence, and should not be used concurrently by different threads + to parse separate token sequences. + + See driver.py for how to get input tokens by tokenizing a file or + string. + + Parsing is complete when addtoken() returns True; the root of the + abstract syntax tree can then be retrieved from the rootnode + instance variable. When a syntax error occurs, addtoken() raises + the ParseError exception. There is no error recovery; the parser + cannot be used after a syntax error was reported (but it can be + reinitialized by calling setup()). + + """ + + def __init__(self, grammar, convert=None): + """Constructor. + + The grammar argument is a grammar.Grammar instance; see the + grammar module for more information. + + The parser is not ready yet for parsing; you must call the + setup() method to get it started. + + The optional convert argument is a function mapping concrete + syntax tree nodes to abstract syntax tree nodes. If not + given, no conversion is done and the syntax tree produced is + the concrete syntax tree. If given, it must be a function of + two arguments, the first being the grammar (a grammar.Grammar + instance), and the second being the concrete syntax tree node + to be converted. The syntax tree is converted from the bottom + up. + + A concrete syntax tree node is a (type, value, context, nodes) + tuple, where type is the node type (a token or symbol number), + value is None for symbols and a string for tokens, context is + None or an opaque value used for error reporting (typically a + (lineno, offset) pair), and nodes is a list of children for + symbols, and None for tokens. + + An abstract syntax tree node may be anything; this is entirely + up to the converter function. + + """ + self.grammar = grammar + self.convert = convert or (lambda grammar, node: node) + + def setup(self, start=None): + """Prepare for parsing. + + This *must* be called before starting to parse. + + The optional argument is an alternative start symbol; it + defaults to the grammar's start symbol. + + You can use a Parser instance to parse any number of programs; + each time you call setup() the parser is reset to an initial + state determined by the (implicit or explicit) start symbol. + + """ + if start is None: + start = self.grammar.start + # Each stack entry is a tuple: (dfa, state, node). + # A node is a tuple: (type, value, context, children), + # where children is a list of nodes or None, and context may be None. + newnode = (start, None, None, []) + stackentry = (self.grammar.dfas[start], 0, newnode) + self.stack = [stackentry] + self.rootnode = None + self.used_names = set() # Aliased to self.rootnode.used_names in pop() + + def addtoken(self, type, value, context): + """Add a token; return True iff this is the end of the program.""" + # Map from token to label + ilabel = self.classify(type, value, context) + # Loop until the token is shifted; may raise exceptions + while True: + dfa, state, node = self.stack[-1] + states, first = dfa + arcs = states[state] + # Look for a state with this label + for i, newstate in arcs: + t, v = self.grammar.labels[i] + if ilabel == i: + # Look it up in the list of labels + assert t < 256 + # Shift a token; we're done with it + self.shift(type, value, newstate, context) + # Pop while we are in an accept-only state + state = newstate + while states[state] == [(0, state)]: + self.pop() + if not self.stack: + # Done parsing! + return True + dfa, state, node = self.stack[-1] + states, first = dfa + # Done with this token + return False + elif t >= 256: + # See if it's a symbol and if we're in its first set + itsdfa = self.grammar.dfas[t] + itsstates, itsfirst = itsdfa + if ilabel in itsfirst: + # Push a symbol + self.push(t, self.grammar.dfas[t], newstate, context) + break # To continue the outer while loop + else: + if (0, state) in arcs: + # An accepting state, pop it and try something else + self.pop() + if not self.stack: + # Done parsing, but another token is input + raise ParseError("too much input", + type, value, context) + else: + # No success finding a transition + raise ParseError("bad input", type, value, context) + + def classify(self, type, value, context): + """Turn a token into a label. (Internal)""" + if type == token.NAME: + # Keep a listing of all used names + self.used_names.add(value) + # Check for reserved words + ilabel = self.grammar.keywords.get(value) + if ilabel is not None: + return ilabel + ilabel = self.grammar.tokens.get(type) + if ilabel is None: + raise ParseError("bad token", type, value, context) + return ilabel + + def shift(self, type, value, newstate, context): + """Shift a token. (Internal)""" + dfa, state, node = self.stack[-1] + newnode = (type, value, context, None) + newnode = self.convert(self.grammar, newnode) + if newnode is not None: + node[-1].append(newnode) + self.stack[-1] = (dfa, newstate, node) + + def push(self, type, newdfa, newstate, context): + """Push a nonterminal. (Internal)""" + dfa, state, node = self.stack[-1] + newnode = (type, None, context, []) + self.stack[-1] = (dfa, newstate, node) + self.stack.append((newdfa, 0, newnode)) + + def pop(self): + """Pop a nonterminal. (Internal)""" + popdfa, popstate, popnode = self.stack.pop() + newnode = self.convert(self.grammar, popnode) + if newnode is not None: + if self.stack: + dfa, state, node = self.stack[-1] + node[-1].append(newnode) + else: + self.rootnode = newnode + self.rootnode.used_names = self.used_names diff --git a/sphinx/pycode/pgen2/pgen.py b/sphinx/pycode/pgen2/pgen.py new file mode 100644 index 000000000..d6895eaef --- /dev/null +++ b/sphinx/pycode/pgen2/pgen.py @@ -0,0 +1,384 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# Pgen imports +from sphinx.pycode.pgen2 import grammar, token, tokenize + +class PgenGrammar(grammar.Grammar): + pass + +class ParserGenerator(object): + + def __init__(self, filename, stream=None): + close_stream = None + if stream is None: + stream = open(filename) + close_stream = stream.close + self.filename = filename + self.stream = stream + self.generator = tokenize.generate_tokens(stream.readline) + self.gettoken() # Initialize lookahead + self.dfas, self.startsymbol = self.parse() + if close_stream is not None: + close_stream() + self.first = {} # map from symbol name to set of tokens + self.addfirstsets() + + def make_grammar(self): + c = PgenGrammar() + names = self.dfas.keys() + names.sort() + names.remove(self.startsymbol) + names.insert(0, self.startsymbol) + for name in names: + i = 256 + len(c.symbol2number) + c.symbol2number[name] = i + c.number2symbol[i] = name + for name in names: + dfa = self.dfas[name] + states = [] + for state in dfa: + arcs = [] + for label, next in state.arcs.iteritems(): + arcs.append((self.make_label(c, label), dfa.index(next))) + if state.isfinal: + arcs.append((0, dfa.index(state))) + states.append(arcs) + c.states.append(states) + c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name)) + c.start = c.symbol2number[self.startsymbol] + return c + + def make_first(self, c, name): + rawfirst = self.first[name] + first = {} + for label in rawfirst: + ilabel = self.make_label(c, label) + ##assert ilabel not in first # XXX failed on <> ... != + first[ilabel] = 1 + return first + + def make_label(self, c, label): + # XXX Maybe this should be a method on a subclass of converter? + ilabel = len(c.labels) + if label[0].isalpha(): + # Either a symbol name or a named token + if label in c.symbol2number: + # A symbol name (a non-terminal) + if label in c.symbol2label: + return c.symbol2label[label] + else: + c.labels.append((c.symbol2number[label], None)) + c.symbol2label[label] = ilabel + return ilabel + else: + # A named token (NAME, NUMBER, STRING) + itoken = getattr(token, label, None) + assert isinstance(itoken, int), label + assert itoken in token.tok_name, label + if itoken in c.tokens: + return c.tokens[itoken] + else: + c.labels.append((itoken, None)) + c.tokens[itoken] = ilabel + return ilabel + else: + # Either a keyword or an operator + assert label[0] in ('"', "'"), label + value = eval(label) + if value[0].isalpha(): + # A keyword + if value in c.keywords: + return c.keywords[value] + else: + c.labels.append((token.NAME, value)) + c.keywords[value] = ilabel + return ilabel + else: + # An operator (any non-numeric token) + itoken = grammar.opmap[value] # Fails if unknown token + if itoken in c.tokens: + return c.tokens[itoken] + else: + c.labels.append((itoken, None)) + c.tokens[itoken] = ilabel + return ilabel + + def addfirstsets(self): + names = self.dfas.keys() + names.sort() + for name in names: + if name not in self.first: + self.calcfirst(name) + #print name, self.first[name].keys() + + def calcfirst(self, name): + dfa = self.dfas[name] + self.first[name] = None # dummy to detect left recursion + state = dfa[0] + totalset = {} + overlapcheck = {} + for label, next in state.arcs.iteritems(): + if label in self.dfas: + if label in self.first: + fset = self.first[label] + if fset is None: + raise ValueError("recursion for rule %r" % name) + else: + self.calcfirst(label) + fset = self.first[label] + totalset.update(fset) + overlapcheck[label] = fset + else: + totalset[label] = 1 + overlapcheck[label] = {label: 1} + inverse = {} + for label, itsfirst in overlapcheck.iteritems(): + for symbol in itsfirst: + if symbol in inverse: + raise ValueError("rule %s is ambiguous; %s is in the" + " first sets of %s as well as %s" % + (name, symbol, label, inverse[symbol])) + inverse[symbol] = label + self.first[name] = totalset + + def parse(self): + dfas = {} + startsymbol = None + # MSTART: (NEWLINE | RULE)* ENDMARKER + while self.type != token.ENDMARKER: + while self.type == token.NEWLINE: + self.gettoken() + # RULE: NAME ':' RHS NEWLINE + name = self.expect(token.NAME) + self.expect(token.OP, ":") + a, z = self.parse_rhs() + self.expect(token.NEWLINE) + #self.dump_nfa(name, a, z) + dfa = self.make_dfa(a, z) + #self.dump_dfa(name, dfa) + oldlen = len(dfa) + self.simplify_dfa(dfa) + newlen = len(dfa) + dfas[name] = dfa + #print name, oldlen, newlen + if startsymbol is None: + startsymbol = name + return dfas, startsymbol + + def make_dfa(self, start, finish): + # To turn an NFA into a DFA, we define the states of the DFA + # to correspond to *sets* of states of the NFA. Then do some + # state reduction. Let's represent sets as dicts with 1 for + # values. + assert isinstance(start, NFAState) + assert isinstance(finish, NFAState) + def closure(state): + base = {} + addclosure(state, base) + return base + def addclosure(state, base): + assert isinstance(state, NFAState) + if state in base: + return + base[state] = 1 + for label, next in state.arcs: + if label is None: + addclosure(next, base) + states = [DFAState(closure(start), finish)] + for state in states: # NB states grows while we're iterating + arcs = {} + for nfastate in state.nfaset: + for label, next in nfastate.arcs: + if label is not None: + addclosure(next, arcs.setdefault(label, {})) + for label, nfaset in arcs.iteritems(): + for st in states: + if st.nfaset == nfaset: + break + else: + st = DFAState(nfaset, finish) + states.append(st) + state.addarc(st, label) + return states # List of DFAState instances; first one is start + + def dump_nfa(self, name, start, finish): + print "Dump of NFA for", name + todo = [start] + for i, state in enumerate(todo): + print " State", i, state is finish and "(final)" or "" + for label, next in state.arcs: + if next in todo: + j = todo.index(next) + else: + j = len(todo) + todo.append(next) + if label is None: + print " -> %d" % j + else: + print " %s -> %d" % (label, j) + + def dump_dfa(self, name, dfa): + print "Dump of DFA for", name + for i, state in enumerate(dfa): + print " State", i, state.isfinal and "(final)" or "" + for label, next in state.arcs.iteritems(): + print " %s -> %d" % (label, dfa.index(next)) + + def simplify_dfa(self, dfa): + # This is not theoretically optimal, but works well enough. + # Algorithm: repeatedly look for two states that have the same + # set of arcs (same labels pointing to the same nodes) and + # unify them, until things stop changing. + + # dfa is a list of DFAState instances + changes = True + while changes: + changes = False + for i, state_i in enumerate(dfa): + for j in range(i+1, len(dfa)): + state_j = dfa[j] + if state_i == state_j: + #print " unify", i, j + del dfa[j] + for state in dfa: + state.unifystate(state_j, state_i) + changes = True + break + + def parse_rhs(self): + # RHS: ALT ('|' ALT)* + a, z = self.parse_alt() + if self.value != "|": + return a, z + else: + aa = NFAState() + zz = NFAState() + aa.addarc(a) + z.addarc(zz) + while self.value == "|": + self.gettoken() + a, z = self.parse_alt() + aa.addarc(a) + z.addarc(zz) + return aa, zz + + def parse_alt(self): + # ALT: ITEM+ + a, b = self.parse_item() + while (self.value in ("(", "[") or + self.type in (token.NAME, token.STRING)): + c, d = self.parse_item() + b.addarc(c) + b = d + return a, b + + def parse_item(self): + # ITEM: '[' RHS ']' | ATOM ['+' | '*'] + if self.value == "[": + self.gettoken() + a, z = self.parse_rhs() + self.expect(token.OP, "]") + a.addarc(z) + return a, z + else: + a, z = self.parse_atom() + value = self.value + if value not in ("+", "*"): + return a, z + self.gettoken() + z.addarc(a) + if value == "+": + return a, z + else: + return a, a + + def parse_atom(self): + # ATOM: '(' RHS ')' | NAME | STRING + if self.value == "(": + self.gettoken() + a, z = self.parse_rhs() + self.expect(token.OP, ")") + return a, z + elif self.type in (token.NAME, token.STRING): + a = NFAState() + z = NFAState() + a.addarc(z, self.value) + self.gettoken() + return a, z + else: + self.raise_error("expected (...) or NAME or STRING, got %s/%s", + self.type, self.value) + + def expect(self, type, value=None): + if self.type != type or (value is not None and self.value != value): + self.raise_error("expected %s/%s, got %s/%s", + type, value, self.type, self.value) + value = self.value + self.gettoken() + return value + + def gettoken(self): + tup = self.generator.next() + while tup[0] in (tokenize.COMMENT, tokenize.NL): + tup = self.generator.next() + self.type, self.value, self.begin, self.end, self.line = tup + #print token.tok_name[self.type], repr(self.value) + + def raise_error(self, msg, *args): + if args: + try: + msg = msg % args + except: + msg = " ".join([msg] + map(str, args)) + raise SyntaxError(msg, (self.filename, self.end[0], + self.end[1], self.line)) + +class NFAState(object): + + def __init__(self): + self.arcs = [] # list of (label, NFAState) pairs + + def addarc(self, next, label=None): + assert label is None or isinstance(label, str) + assert isinstance(next, NFAState) + self.arcs.append((label, next)) + +class DFAState(object): + + def __init__(self, nfaset, final): + assert isinstance(nfaset, dict) + assert isinstance(iter(nfaset).next(), NFAState) + assert isinstance(final, NFAState) + self.nfaset = nfaset + self.isfinal = final in nfaset + self.arcs = {} # map from label to DFAState + + def addarc(self, next, label): + assert isinstance(label, str) + assert label not in self.arcs + assert isinstance(next, DFAState) + self.arcs[label] = next + + def unifystate(self, old, new): + for label, next in self.arcs.iteritems(): + if next is old: + self.arcs[label] = new + + def __eq__(self, other): + # Equality test -- ignore the nfaset instance variable + assert isinstance(other, DFAState) + if self.isfinal != other.isfinal: + return False + # Can't just return self.arcs == other.arcs, because that + # would invoke this method recursively, with cycles... + if len(self.arcs) != len(other.arcs): + return False + for label, next in self.arcs.iteritems(): + if next is not other.arcs.get(label): + return False + return True + +def generate_grammar(filename="Grammar.txt"): + p = ParserGenerator(filename) + return p.make_grammar() diff --git a/sphinx/pycode/pgen2/token.py b/sphinx/pycode/pgen2/token.py new file mode 100755 index 000000000..61468b313 --- /dev/null +++ b/sphinx/pycode/pgen2/token.py @@ -0,0 +1,82 @@ +#! /usr/bin/env python + +"""Token constants (from "token.h").""" + +# Taken from Python (r53757) and modified to include some tokens +# originally monkeypatched in by pgen2.tokenize + +#--start constants-- +ENDMARKER = 0 +NAME = 1 +NUMBER = 2 +STRING = 3 +NEWLINE = 4 +INDENT = 5 +DEDENT = 6 +LPAR = 7 +RPAR = 8 +LSQB = 9 +RSQB = 10 +COLON = 11 +COMMA = 12 +SEMI = 13 +PLUS = 14 +MINUS = 15 +STAR = 16 +SLASH = 17 +VBAR = 18 +AMPER = 19 +LESS = 20 +GREATER = 21 +EQUAL = 22 +DOT = 23 +PERCENT = 24 +BACKQUOTE = 25 +LBRACE = 26 +RBRACE = 27 +EQEQUAL = 28 +NOTEQUAL = 29 +LESSEQUAL = 30 +GREATEREQUAL = 31 +TILDE = 32 +CIRCUMFLEX = 33 +LEFTSHIFT = 34 +RIGHTSHIFT = 35 +DOUBLESTAR = 36 +PLUSEQUAL = 37 +MINEQUAL = 38 +STAREQUAL = 39 +SLASHEQUAL = 40 +PERCENTEQUAL = 41 +AMPEREQUAL = 42 +VBAREQUAL = 43 +CIRCUMFLEXEQUAL = 44 +LEFTSHIFTEQUAL = 45 +RIGHTSHIFTEQUAL = 46 +DOUBLESTAREQUAL = 47 +DOUBLESLASH = 48 +DOUBLESLASHEQUAL = 49 +AT = 50 +OP = 51 +COMMENT = 52 +NL = 53 +RARROW = 54 +ERRORTOKEN = 55 +N_TOKENS = 56 +NT_OFFSET = 256 +#--end constants-- + +tok_name = {} +for _name, _value in globals().items(): + if type(_value) is type(0): + tok_name[_value] = _name + + +def ISTERMINAL(x): + return x < NT_OFFSET + +def ISNONTERMINAL(x): + return x >= NT_OFFSET + +def ISEOF(x): + return x == ENDMARKER diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py new file mode 100644 index 000000000..46ee7842b --- /dev/null +++ b/sphinx/pycode/pgen2/tokenize.py @@ -0,0 +1,405 @@ +# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation. +# All rights reserved. + +"""Tokenization help for Python programs. + +generate_tokens(readline) is a generator that breaks a stream of +text into Python tokens. It accepts a readline-like method which is called +repeatedly to get the next line of input (or "" for EOF). It generates +5-tuples with these members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators + +Older entry points + tokenize_loop(readline, tokeneater) + tokenize(readline, tokeneater=printtoken) +are the same, except instead of generating tokens, tokeneater is a callback +function to which the 5 fields described above are passed as 5 arguments, +each time a new token is found.""" + +__author__ = 'Ka-Ping Yee ' +__credits__ = \ + 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro' + +import string, re +from sphinx.pycode.pgen2.token import * +from sphinx.pycode.pgen2 import token + +__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", + "generate_tokens", "untokenize"] +del token + +def group(*choices): return '(' + '|'.join(choices) + ')' +def any(*choices): return group(*choices) + '*' +def maybe(*choices): return group(*choices) + '?' + +Whitespace = r'[ \f\t]*' +Comment = r'#[^\r\n]*' +Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) +Name = r'[a-zA-Z_]\w*' + +Binnumber = r'0[bB][01]*' +Hexnumber = r'0[xX][\da-fA-F]*[lL]?' +Octnumber = r'0[oO]?[0-7]*[lL]?' +Decnumber = r'[1-9]\d*[lL]?' +Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber) +Exponent = r'[eE][-+]?\d+' +Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent) +Expfloat = r'\d+' + Exponent +Floatnumber = group(Pointfloat, Expfloat) +Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]') +Number = group(Imagnumber, Floatnumber, Intnumber) + +# Tail end of ' string. +Single = r"[^'\\]*(?:\\.[^'\\]*)*'" +# Tail end of " string. +Double = r'[^"\\]*(?:\\.[^"\\]*)*"' +# Tail end of ''' string. +Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" +# Tail end of """ string. +Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' +Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""') +# Single-line ' or " string. +String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", + r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') + +# Because of leftmost-then-longest match semantics, be sure to put the +# longest operators first (e.g., if = came before ==, == would get +# recognized as two instances of =). +Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=", + r"//=?", r"->", + r"[+\-*/%&|^=<>]=?", + r"~") + +Bracket = '[][(){}]' +Special = group(r'\r?\n', r'[:;.,`@]') +Funny = group(Operator, Bracket, Special) + +PlainToken = group(Number, Funny, String, Name) +Token = Ignore + PlainToken + +# First (or only) line of ' or " string. +ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + + group("'", r'\\\r?\n'), + r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + + group('"', r'\\\r?\n')) +PseudoExtras = group(r'\\\r?\n', Comment, Triple) +PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) + +tokenprog, pseudoprog, single3prog, double3prog = map( + re.compile, (Token, PseudoToken, Single3, Double3)) +endprogs = {"'": re.compile(Single), '"': re.compile(Double), + "'''": single3prog, '"""': double3prog, + "r'''": single3prog, 'r"""': double3prog, + "u'''": single3prog, 'u"""': double3prog, + "b'''": single3prog, 'b"""': double3prog, + "ur'''": single3prog, 'ur"""': double3prog, + "br'''": single3prog, 'br"""': double3prog, + "R'''": single3prog, 'R"""': double3prog, + "U'''": single3prog, 'U"""': double3prog, + "B'''": single3prog, 'B"""': double3prog, + "uR'''": single3prog, 'uR"""': double3prog, + "Ur'''": single3prog, 'Ur"""': double3prog, + "UR'''": single3prog, 'UR"""': double3prog, + "bR'''": single3prog, 'bR"""': double3prog, + "Br'''": single3prog, 'Br"""': double3prog, + "BR'''": single3prog, 'BR"""': double3prog, + 'r': None, 'R': None, + 'u': None, 'U': None, + 'b': None, 'B': None} + +triple_quoted = {} +for t in ("'''", '"""', + "r'''", 'r"""', "R'''", 'R"""', + "u'''", 'u"""', "U'''", 'U"""', + "b'''", 'b"""', "B'''", 'B"""', + "ur'''", 'ur"""', "Ur'''", 'Ur"""', + "uR'''", 'uR"""', "UR'''", 'UR"""', + "br'''", 'br"""', "Br'''", 'Br"""', + "bR'''", 'bR"""', "BR'''", 'BR"""',): + triple_quoted[t] = t +single_quoted = {} +for t in ("'", '"', + "r'", 'r"', "R'", 'R"', + "u'", 'u"', "U'", 'U"', + "b'", 'b"', "B'", 'B"', + "ur'", 'ur"', "Ur'", 'Ur"', + "uR'", 'uR"', "UR'", 'UR"', + "br'", 'br"', "Br'", 'Br"', + "bR'", 'bR"', "BR'", 'BR"', ): + single_quoted[t] = t + +tabsize = 8 + +class TokenError(Exception): pass + +class StopTokenizing(Exception): pass + +def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing + print "%d,%d-%d,%d:\t%s\t%s" % \ + (srow, scol, erow, ecol, tok_name[type], repr(token)) + +def tokenize(readline, tokeneater=printtoken): + """ + The tokenize() function accepts two parameters: one representing the + input stream, and one providing an output mechanism for tokenize(). + + The first parameter, readline, must be a callable object which provides + the same interface as the readline() method of built-in file objects. + Each call to the function should return one line of input as a string. + + The second parameter, tokeneater, must also be a callable object. It is + called once for each token, with five arguments, corresponding to the + tuples generated by generate_tokens(). + """ + try: + tokenize_loop(readline, tokeneater) + except StopTokenizing: + pass + +# backwards compatible interface +def tokenize_loop(readline, tokeneater): + for token_info in generate_tokens(readline): + tokeneater(*token_info) + +class Untokenizer: + + def __init__(self): + self.tokens = [] + self.prev_row = 1 + self.prev_col = 0 + + def add_whitespace(self, start): + row, col = start + assert row <= self.prev_row + col_offset = col - self.prev_col + if col_offset: + self.tokens.append(" " * col_offset) + + def untokenize(self, iterable): + for t in iterable: + if len(t) == 2: + self.compat(t, iterable) + break + tok_type, token, start, end, line = t + self.add_whitespace(start) + self.tokens.append(token) + self.prev_row, self.prev_col = end + if tok_type in (NEWLINE, NL): + self.prev_row += 1 + self.prev_col = 0 + return "".join(self.tokens) + + def compat(self, token, iterable): + startline = False + indents = [] + toks_append = self.tokens.append + toknum, tokval = token + if toknum in (NAME, NUMBER): + tokval += ' ' + if toknum in (NEWLINE, NL): + startline = True + for tok in iterable: + toknum, tokval = tok[:2] + + if toknum in (NAME, NUMBER): + tokval += ' ' + + if toknum == INDENT: + indents.append(tokval) + continue + elif toknum == DEDENT: + indents.pop() + continue + elif toknum in (NEWLINE, NL): + startline = True + elif startline and indents: + toks_append(indents[-1]) + startline = False + toks_append(tokval) + +def untokenize(iterable): + """Transform tokens back into Python source code. + + Each element returned by the iterable must be a token sequence + with at least two elements, a token number and token value. If + only two tokens are passed, the resulting output is poor. + + Round-trip invariant for full input: + Untokenized source will match input source exactly + + Round-trip invariant for limited intput: + # Output text will tokenize the back to the input + t1 = [tok[:2] for tok in generate_tokens(f.readline)] + newcode = untokenize(t1) + readline = iter(newcode.splitlines(1)).next + t2 = [tok[:2] for tokin generate_tokens(readline)] + assert t1 == t2 + """ + ut = Untokenizer() + return ut.untokenize(iterable) + +def generate_tokens(readline): + """ + The generate_tokens() generator requires one argment, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as a string. Alternately, readline + can be a callable function terminating with StopIteration: + readline = open(myfile).next # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + logical line; continuation lines are included. + """ + lnum = parenlev = continued = 0 + namechars, numchars = string.ascii_letters + '_', '0123456789' + contstr, needcont = '', 0 + contline = None + indents = [0] + + while 1: # loop over lines in stream + try: + line = readline() + except StopIteration: + line = '' + lnum = lnum + 1 + pos, max = 0, len(line) + + if contstr: # continued string + if not line: + raise TokenError, ("EOF in multi-line string", strstart) + endmatch = endprog.match(line) + if endmatch: + pos = end = endmatch.end(0) + yield (STRING, contstr + line[:end], + strstart, (lnum, end), contline + line) + contstr, needcont = '', 0 + contline = None + elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': + yield (ERRORTOKEN, contstr + line, + strstart, (lnum, len(line)), contline) + contstr = '' + contline = None + continue + else: + contstr = contstr + line + contline = contline + line + continue + + elif parenlev == 0 and not continued: # new statement + if not line: break + column = 0 + while pos < max: # measure leading whitespace + if line[pos] == ' ': column = column + 1 + elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize + elif line[pos] == '\f': column = 0 + else: break + pos = pos + 1 + if pos == max: break + + if line[pos] in '#\r\n': # skip comments or blank lines + if line[pos] == '#': + comment_token = line[pos:].rstrip('\r\n') + nl_pos = pos + len(comment_token) + yield (COMMENT, comment_token, + (lnum, pos), (lnum, pos + len(comment_token)), line) + yield (NL, line[nl_pos:], + (lnum, nl_pos), (lnum, len(line)), line) + else: + yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], + (lnum, pos), (lnum, len(line)), line) + continue + + if column > indents[-1]: # count indents or dedents + indents.append(column) + yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) + while column < indents[-1]: + if column not in indents: + raise IndentationError( + "unindent does not match any outer indentation level", + ("", lnum, pos, line)) + indents = indents[:-1] + yield (DEDENT, '', (lnum, pos), (lnum, pos), line) + + else: # continued statement + if not line: + raise TokenError, ("EOF in multi-line statement", (lnum, 0)) + continued = 0 + + while pos < max: + pseudomatch = pseudoprog.match(line, pos) + if pseudomatch: # scan for tokens + start, end = pseudomatch.span(1) + spos, epos, pos = (lnum, start), (lnum, end), end + token, initial = line[start:end], line[start] + + if initial in numchars or \ + (initial == '.' and token != '.'): # ordinary number + yield (NUMBER, token, spos, epos, line) + elif initial in '\r\n': + newline = NEWLINE + if parenlev > 0: + newline = NL + yield (newline, token, spos, epos, line) + elif initial == '#': + assert not token.endswith("\n") + yield (COMMENT, token, spos, epos, line) + elif token in triple_quoted: + endprog = endprogs[token] + endmatch = endprog.match(line, pos) + if endmatch: # all on one line + pos = endmatch.end(0) + token = line[start:pos] + yield (STRING, token, spos, (lnum, pos), line) + else: + strstart = (lnum, start) # multiple lines + contstr = line[start:] + contline = line + break + elif initial in single_quoted or \ + token[:2] in single_quoted or \ + token[:3] in single_quoted: + if token[-1] == '\n': # continued string + strstart = (lnum, start) + endprog = (endprogs[initial] or endprogs[token[1]] or + endprogs[token[2]]) + contstr, needcont = line[start:], 1 + contline = line + break + else: # ordinary string + yield (STRING, token, spos, epos, line) + elif initial in namechars: # ordinary name + yield (NAME, token, spos, epos, line) + elif initial == '\\': # continued stmt + # This yield is new; needed for better idempotency: + yield (NL, token, spos, (lnum, pos), line) + continued = 1 + else: + if initial in '([{': parenlev = parenlev + 1 + elif initial in ')]}': parenlev = parenlev - 1 + yield (OP, token, spos, epos, line) + else: + yield (ERRORTOKEN, line[pos], + (lnum, pos), (lnum, pos+1), line) + pos = pos + 1 + + for indent in indents[1:]: # pop remaining indent levels + yield (DEDENT, '', (lnum, 0), (lnum, 0), '') + yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '') + +if __name__ == '__main__': # testing + import sys + if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) + else: tokenize(sys.stdin.readline) diff --git a/sphinx/pycode/pytree.py b/sphinx/pycode/pytree.py new file mode 100644 index 000000000..950319c59 --- /dev/null +++ b/sphinx/pycode/pytree.py @@ -0,0 +1,295 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Python parse tree definitions. + +This is a very concrete parse tree; we need to keep every token and +even the comments and whitespace between tokens. + +Adapted for read-only nodes from pytree.py in Python's 2to3 tool, and +added a few bits. +""" + +__author__ = "Guido van Rossum " + + +class Base(object): + + """Abstract base class for Node and Leaf. + + This provides some default functionality and boilerplate using the + template pattern. + + A node may be a subnode of at most one parent. + """ + + # Default values for instance variables + type = None # int: token number (< 256) or symbol number (>= 256) + parent = None # Parent node pointer, or None + children = () # Tuple of subnodes + was_changed = False + + def __new__(cls, *args, **kwds): + """Constructor that prevents Base from being instantiated.""" + assert cls is not Base, "Cannot instantiate Base" + return object.__new__(cls) + + def __eq__(self, other): + """Compares two nodes for equality. + + This calls the method _eq(). + """ + if self.__class__ is not other.__class__: + return NotImplemented + return self._eq(other) + + def __ne__(self, other): + """Compares two nodes for inequality. + + This calls the method _eq(). + """ + if self.__class__ is not other.__class__: + return NotImplemented + return not self._eq(other) + + def _eq(self, other): + """Compares two nodes for equality. + + This is called by __eq__ and __ne__. It is only called if the + two nodes have the same type. This must be implemented by the + concrete subclass. Nodes should be considered equal if they + have the same structure, ignoring the prefix string and other + context information. + """ + raise NotImplementedError + + def get_lineno(self): + """Returns the line number which generated the invocant node.""" + node = self + while not isinstance(node, Leaf): + if not node.children: + return + node = node.children[0] + return node.lineno + + def get_next_sibling(self): + """Return the node immediately following the invocant in their + parent's children list. If the invocant does not have a next + sibling, return None.""" + if self.parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + try: + return self.parent.children[i+1] + except IndexError: + return None + + def get_prev_sibling(self): + """Return the node immediately preceding the invocant in their + parent's children list. If the invocant does not have a previous + sibling, return None.""" + if self.parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + if i == 0: + return None + return self.parent.children[i-1] + + def get_prev_leaf(self): + """Return the leaf node that precedes this node in the parse tree.""" + def last_child(node): + if isinstance(node, Leaf): + return node + elif not node.children: + return None + else: + return last_child(node.children[-1]) + if self.parent is None: + return None + prev = self.get_prev_sibling() + if isinstance(prev, Leaf): + return prev + elif prev is not None: + return last_child(prev) + return self.parent.get_prev_leaf() + + def get_suffix(self): + """Return the string immediately following the invocant node. This + is effectively equivalent to node.get_next_sibling().get_prefix()""" + next_sib = self.get_next_sibling() + if next_sib is None: + return "" + return next_sib.get_prefix() + + +class Node(Base): + + """Concrete implementation for interior nodes.""" + + def __init__(self, type, children, context=None, prefix=None): + """Initializer. + + Takes a type constant (a symbol number >= 256), a sequence of + child nodes, and an optional context keyword argument. + + As a side effect, the parent pointers of the children are updated. + """ + assert type >= 256, type + self.type = type + self.children = list(children) + for ch in self.children: + assert ch.parent is None, repr(ch) + ch.parent = self + if prefix is not None: + self.set_prefix(prefix) + + def __repr__(self): + return "%s(%s, %r)" % (self.__class__.__name__, + self.type, self.children) + + def __str__(self): + """This reproduces the input source exactly.""" + return "".join(map(str, self.children)) + + def compact(self): + return ''.join(child.compact() for child in self.children) + + def __getitem__(self, index): + return self.children[index] + + def __iter__(self): + return iter(self.children) + + def _eq(self, other): + """Compares two nodes for equality.""" + return (self.type, self.children) == (other.type, other.children) + + def post_order(self): + """Returns a post-order iterator for the tree.""" + for child in self.children: + for node in child.post_order(): + yield node + yield self + + def pre_order(self): + """Returns a pre-order iterator for the tree.""" + yield self + for child in self.children: + for node in child.post_order(): + yield node + + def get_prefix(self): + """Returns the prefix for the node. + + This passes the call on to the first child. + """ + if not self.children: + return "" + return self.children[0].get_prefix() + + +class Leaf(Base): + + """Concrete implementation for leaf nodes.""" + + # Default values for instance variables + prefix = "" # Whitespace and comments preceding this token in the input + lineno = 0 # Line where this token starts in the input + column = 0 # Column where this token tarts in the input + + def __init__(self, type, value, context=None, prefix=None): + """Initializer. + + Takes a type constant (a token number < 256), a string value, + and an optional context keyword argument. + """ + assert 0 <= type < 256, type + if context is not None: + self.prefix, (self.lineno, self.column) = context + self.type = type + self.value = value + if prefix is not None: + self.prefix = prefix + + def __repr__(self): + return "%s(%r, %r)" % (self.__class__.__name__, + self.type, self.value) + + def __str__(self): + """This reproduces the input source exactly.""" + return self.prefix + str(self.value) + + def compact(self): + return str(self.value) + + def _eq(self, other): + """Compares two nodes for equality.""" + return (self.type, self.value) == (other.type, other.value) + + def post_order(self): + """Returns a post-order iterator for the tree.""" + yield self + + def pre_order(self): + """Returns a pre-order iterator for the tree.""" + yield self + + def get_prefix(self): + """Returns the prefix for the node.""" + return self.prefix + + +def convert(grammar, raw_node): + """Convert raw node to a Node or Leaf instance.""" + type, value, context, children = raw_node + if children or type in grammar.number2symbol: + # If there's exactly one child, return that child instead of + # creating a new node. + if len(children) == 1: + return children[0] + return Node(type, children, context=context) + else: + return Leaf(type, value, context=context) + + +def nice_repr(node, number2name, prefix=False): + def _repr(node): + if isinstance(node, Leaf): + return "%s(%r)" % (number2name[node.type], node.value) + else: + return "%s(%s)" % (number2name[node.type], + ', '.join(map(_repr, node.children))) + def _prepr(node): + if isinstance(node, Leaf): + return "%s(%r, %r)" % (number2name[node.type], node.prefix, node.value) + else: + return "%s(%s)" % (number2name[node.type], + ', '.join(map(_prepr, node.children))) + return (prefix and _prepr or _repr)(node) + + +class NodeVisitor(object): + def __init__(self, number2name): + self.number2name = number2name + self.init() + + def init(self): + pass + + def visit(self, node): + """Visit a node.""" + method = 'visit_' + self.number2name[node.type] + visitor = getattr(self, method, self.generic_visit) + return visitor(node) + + def generic_visit(self, node): + """Called if no explicit visitor function exists for a node.""" + if isinstance(node, Node): + for child in node: + self.visit(child) From bb2f9b67d847af6970d73e6b4036877abae8172b Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 20:37:30 +0100 Subject: [PATCH 02/29] Cleanup; add scoping to ClassAttrVisitor. --- sphinx/pycode/__init__.py | 61 ++++++++++++++++++--------------------- sphinx/pycode/pytree.py | 6 ++-- 2 files changed, 31 insertions(+), 36 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index b5d83e679..e006ce160 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -10,6 +10,7 @@ """ import sys +import time from os import path from sphinx.pycode import pytree @@ -45,8 +46,10 @@ _eq = pytree.Leaf(token.EQUAL, '=') class ClassAttrVisitor(pytree.NodeVisitor): - def init(self): + def init(self, scope): + self.scope = scope self.namespace = [] + self.collected = [] def visit_classdef(self, node): self.namespace.append(node[1].value) @@ -54,17 +57,21 @@ class ClassAttrVisitor(pytree.NodeVisitor): self.namespace.pop() def visit_expr_stmt(self, node): - if _eq in node.children: - prefix = node[0].get_prefix() - if not prefix: - prev = node[0].get_prev_leaf() - if prev and prev.type == token.INDENT: - prefix = prev.prefix - doc = prepare_commentdoc(prefix) - if doc: - targ = '.'.join(self.namespace + [node[0].compact()]) - print targ - print doc + if _eq not in node.children: + # not an assignment (we don't care for augmented assignments) + return + prefix = node[0].get_prefix() + if not prefix: + # if this assignment is the first thing in a class block, + # the comment will be the prefix of the preceding INDENT token + prev = node[0].get_prev_leaf() + if prev and prev.type == token.INDENT: + prefix = prev.prefix + doc = prepare_commentdoc(prefix) + if doc: + name = '.'.join(self.namespace + [node[0].compact()]) + if name.startswith(self.scope): + self.collected.append((name, doc)) def visit_funcdef(self, node): return @@ -115,28 +122,16 @@ class ModuleAnalyzer(object): return cls.for_file(filename, modname) def find_defs(self): - attr_visitor = ClassAttrVisitor(number2name) - attr_visitor.namespace = [self.modname] + attr_visitor = ClassAttrVisitor(number2name, '') attr_visitor.visit(self.tree) - -class Test: - """doc""" - - #: testing... - x = 1 - """doc""" - - #: testing more... - x = 2 + for name, doc in attr_visitor.collected: + print '>>', name + print doc -#ma = ModuleAnalyzer.for_file(__file__.rstrip('c')) -import time -x0=time.time() -ma = ModuleAnalyzer.for_module('sphinx.builders.latex') -x1=time.time() +x0 = time.time() +ma = ModuleAnalyzer.for_module('sphinx.builders.html') +x1 = time.time() ma.find_defs() -x2=time.time() -print "%.4f %.4f" % (x1-x0, x2-x1) - -#print pytree.nice_repr(ma.tree, number2name, True) +x2 = time.time() +print "parsing %.4f, finding %.4f" % (x1-x0, x2-x1) diff --git a/sphinx/pycode/pytree.py b/sphinx/pycode/pytree.py index 950319c59..a0e83b633 100644 --- a/sphinx/pycode/pytree.py +++ b/sphinx/pycode/pytree.py @@ -275,11 +275,11 @@ def nice_repr(node, number2name, prefix=False): class NodeVisitor(object): - def __init__(self, number2name): + def __init__(self, number2name, *args): self.number2name = number2name - self.init() + self.init(*args) - def init(self): + def init(self, *args): pass def visit(self, node): From f4cff4371e5feca038e7b77db73367de7a80b97a Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 20:39:14 +0100 Subject: [PATCH 03/29] Add comment. --- sphinx/pycode/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index e006ce160..26d3579dd 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -74,6 +74,7 @@ class ClassAttrVisitor(pytree.NodeVisitor): self.collected.append((name, doc)) def visit_funcdef(self, node): + # don't descend into functions -- nothing interesting there return From 3d4963ae02e1722571f1e100b785b403bf0379b6 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 20:47:32 +0100 Subject: [PATCH 04/29] Improve error handling. --- sphinx/pycode/__init__.py | 51 +++++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 26d3579dd..0ebf683c4 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -14,7 +14,7 @@ import time from os import path from sphinx.pycode import pytree -from sphinx.pycode.pgen2 import driver, token +from sphinx.pycode.pgen2 import driver, token, parse # load the Python grammar @@ -78,6 +78,14 @@ class ClassAttrVisitor(pytree.NodeVisitor): return +class PycodeError(Exception): + def __str__(self): + res = self.args[0] + if len(self.args) > 1: + res += ' (exception was: %r)' % self.args[1] + return res + + class ModuleAnalyzer(object): def __init__(self, tree, modname, srcname): @@ -91,48 +99,55 @@ class ModuleAnalyzer(object): @classmethod def for_file(cls, filename, modname): - # XXX if raises - fileobj = open(filename, 'r') try: - return cls(pydriver.parse_stream(fileobj), modname, filename) + fileobj = open(filename, 'r') + except Exception, err: + raise PycodeError('error opening %r' % filename, err) + try: + try: + return cls(pydriver.parse_stream(fileobj), modname, filename) + except parse.ParseError, err: + raise PycodeError('error parsing %r' % filename, err) finally: fileobj.close() @classmethod def for_module(cls, modname): if modname not in sys.modules: - # XXX - __import__(modname) + try: + __import__(modname) + except ImportError, err: + raise PycodeError('error importing %r' % modname, err) mod = sys.modules[modname] if hasattr(mod, '__loader__'): - # XXX raises - source = mod.__loader__.get_source(modname) + try: + source = mod.__loader__.get_source(modname) + except Exception, err: + raise PycodeError('error getting source for %r' % modname, err) return cls.for_string(source, modname) filename = getattr(mod, '__file__', None) if filename is None: - # XXX - raise RuntimeError('no source found') + raise PycodeError('no source found for module %r' % modname) if filename.lower().endswith('.pyo') or \ filename.lower().endswith('.pyc'): filename = filename[:-1] elif not filename.lower().endswith('.py'): - raise RuntimeError('not a .py file') + raise PycodeError('source is not a .py file: %r' % filename) if not path.isfile(filename): - # XXX - raise RuntimeError('source not present') + raise PycodeError('source file is not present: %r' % filename) return cls.for_file(filename, modname) - def find_defs(self): + def find_attrs(self): attr_visitor = ClassAttrVisitor(number2name, '') attr_visitor.visit(self.tree) - for name, doc in attr_visitor.collected: - print '>>', name - print doc + return attr_visitor.collected x0 = time.time() ma = ModuleAnalyzer.for_module('sphinx.builders.html') x1 = time.time() -ma.find_defs() +for name, doc in ma.find_attrs(): + print '>>', name + print doc x2 = time.time() print "parsing %.4f, finding %.4f" % (x1-x0, x2-x1) From 21caf917f24c2986644ff93c6f9802c39abe4d45 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 22:25:15 +0100 Subject: [PATCH 05/29] Fix handling of INDENT/DEDENT tokens. --- sphinx/pycode/__init__.py | 39 ++++++++++++++++++++++++++++----------- sphinx/pycode/pytree.py | 3 +++ 2 files changed, 31 insertions(+), 11 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 0ebf683c4..ab34af1cc 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -32,6 +32,10 @@ number2name.update(token.tok_name) def prepare_commentdoc(s): + """ + Extract documentation comment lines (starting with #:) and return them as a + list of lines. Returns an empty list if there is no documentation. + """ result = [] lines = [line.strip() for line in s.expandtabs().splitlines()] for line in lines: @@ -46,6 +50,10 @@ _eq = pytree.Leaf(token.EQUAL, '=') class ClassAttrVisitor(pytree.NodeVisitor): + """ + Visitor that collects comments appearing before attribute assignments + on toplevel and in classes. + """ def init(self, scope): self.scope = scope self.namespace = [] @@ -60,18 +68,27 @@ class ClassAttrVisitor(pytree.NodeVisitor): if _eq not in node.children: # not an assignment (we don't care for augmented assignments) return - prefix = node[0].get_prefix() - if not prefix: - # if this assignment is the first thing in a class block, - # the comment will be the prefix of the preceding INDENT token - prev = node[0].get_prev_leaf() - if prev and prev.type == token.INDENT: - prefix = prev.prefix - doc = prepare_commentdoc(prefix) - if doc: - name = '.'.join(self.namespace + [node[0].compact()]) + pnode = node[0] + prefix = pnode.get_prefix() + # if the assignment is the first statement on a new indentation + # level, its preceding whitespace and comments are not assigned + # to that token, but the first INDENT or DEDENT token + while not prefix: + pnode = pnode.get_prev_leaf() + if not pnode or pnode.type not in (token.INDENT, token.DEDENT): + break + docstring = prepare_commentdoc(prefix) + if not docstring: + return + # add an item for each assignment target + for i in range(0, len(node) - 1, 2): + target = node[i] + if target.type != token.NAME: + # don't care about complex targets + continue + name = '.'.join(self.namespace + [target.value]) if name.startswith(self.scope): - self.collected.append((name, doc)) + self.collected.append((name, docstring)) def visit_funcdef(self, node): # don't descend into functions -- nothing interesting there diff --git a/sphinx/pycode/pytree.py b/sphinx/pycode/pytree.py index a0e83b633..bd72bc998 100644 --- a/sphinx/pycode/pytree.py +++ b/sphinx/pycode/pytree.py @@ -166,6 +166,9 @@ class Node(Base): def __iter__(self): return iter(self.children) + def __len__(self): + return len(self.children) + def _eq(self, other): """Compares two nodes for equality.""" return (self.type, self.children) == (other.type, other.children) From 428ed4a277b8198bddad961f3615dd2c2fa9e8f4 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 22:30:00 +0100 Subject: [PATCH 06/29] Another fix for DEDENT/INDENT handling. --- sphinx/builders/html.py | 3 ++- sphinx/pycode/__init__.py | 1 + sphinx/pycode/pytree.py | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py index 4ce30ad8b..2bcd54ece 100644 --- a/sphinx/builders/html.py +++ b/sphinx/builders/html.py @@ -37,8 +37,9 @@ except ImportError: except ImportError: json = None - +#: the filename for the inventory of objects INVENTORY_FILENAME = 'objects.inv' +#: the filename for the "last build" file (for serializing builders) LAST_BUILD_FILENAME = 'last_build' diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index ab34af1cc..dd32d4708 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -77,6 +77,7 @@ class ClassAttrVisitor(pytree.NodeVisitor): pnode = pnode.get_prev_leaf() if not pnode or pnode.type not in (token.INDENT, token.DEDENT): break + prefix = pnode.get_prefix() docstring = prepare_commentdoc(prefix) if not docstring: return diff --git a/sphinx/pycode/pytree.py b/sphinx/pycode/pytree.py index bd72bc998..46017a959 100644 --- a/sphinx/pycode/pytree.py +++ b/sphinx/pycode/pytree.py @@ -221,8 +221,8 @@ class Leaf(Base): self.prefix = prefix def __repr__(self): - return "%s(%r, %r)" % (self.__class__.__name__, - self.type, self.value) + return "%s(%r, %r, %r)" % (self.__class__.__name__, + self.type, self.value, self.prefix) def __str__(self): """This reproduces the input source exactly.""" From eb66f155448b436ccec918c79888779d9bf74bd1 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 29 Dec 2008 22:30:41 +0100 Subject: [PATCH 07/29] Ignore grammar pickle files. --- .hgignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.hgignore b/.hgignore index a6b6e37a9..375894228 100644 --- a/.hgignore +++ b/.hgignore @@ -2,6 +2,7 @@ .*\.egg build/ dist/ +sphinx/pycode/Grammar.*pickle Sphinx.egg-info/ doc/_build/ TAGS From eb7d654cb0bf251cfc5b261383a3de3a3a9dd65d Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 01:29:49 +0100 Subject: [PATCH 08/29] Some speedups in pytree. Add Cython parse.py replacement, yielding a 2x speedup in parsing. --- sphinx/pycode/pgen2/parse.pyx | 156 ++++++++++++++++++++++++++++++++++ sphinx/pycode/pytree.py | 23 ++--- 2 files changed, 165 insertions(+), 14 deletions(-) create mode 100644 sphinx/pycode/pgen2/parse.pyx diff --git a/sphinx/pycode/pgen2/parse.pyx b/sphinx/pycode/pgen2/parse.pyx new file mode 100644 index 000000000..6a11ee6b5 --- /dev/null +++ b/sphinx/pycode/pgen2/parse.pyx @@ -0,0 +1,156 @@ +# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Parser engine for the grammar tables generated by pgen. + +The grammar table must be loaded first. + +See Parser/parser.c in the Python distribution for additional info on +how this parsing engine works. + +""" + +from sphinx.pycode.pytree import Node, Leaf + +DEF NAME = 1 + +class ParseError(Exception): + """Exception to signal the parser is stuck.""" + + def __init__(self, msg, type, value, context): + Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % + (msg, type, value, context)) + self.msg = msg + self.type = type + self.value = value + self.context = context + + +cdef class Parser: + cdef public grammar, stack, rootnode, used_names + cdef _grammar_dfas, _grammar_labels, _grammar_keywords, _grammar_tokens + cdef _grammar_number2symbol + + def __init__(self, grammar, convert=None): + self.grammar = grammar + #self.convert = convert or noconvert + + self._grammar_dfas = grammar.dfas + self._grammar_labels = grammar.labels + self._grammar_keywords = grammar.keywords + self._grammar_tokens = grammar.tokens + self._grammar_number2symbol = grammar.number2symbol + + def setup(self, start=None): + if start is None: + start = self.grammar.start + # Each stack entry is a tuple: (dfa, state, node). + # A node is a tuple: (type, value, context, children), + # where children is a list of nodes or None, and context may be None. + newnode = (start, None, None, []) + stackentry = (self._grammar_dfas[start], 0, newnode) + self.stack = [stackentry] + self.rootnode = None + self.used_names = set() # Aliased to self.rootnode.used_names in pop() + + def addtoken(self, type, value, context): + """Add a token; return True iff this is the end of the program.""" + cdef int ilabel, i, t, state, newstate + # Map from token to label + ilabel = self.classify(type, value, context) + # Loop until the token is shifted; may raise exceptions + while True: + dfa, state, node = self.stack[-1] + states, first = dfa + arcs = states[state] + # Look for a state with this label + for i, newstate in arcs: + t, v = self._grammar_labels[i] + if ilabel == i: + # Look it up in the list of labels + ## assert t < 256 + # Shift a token; we're done with it + self.shift(type, value, newstate, context) + # Pop while we are in an accept-only state + state = newstate + while states[state] == [(0, state)]: + self.pop() + if not self.stack: + # Done parsing! + return True + dfa, state, node = self.stack[-1] + states, first = dfa + # Done with this token + return False + elif t >= 256: + # See if it's a symbol and if we're in its first set + itsdfa = self._grammar_dfas[t] + itsstates, itsfirst = itsdfa + if ilabel in itsfirst: + # Push a symbol + self.push(t, itsdfa, newstate, context) + break # To continue the outer while loop + else: + if (0, state) in arcs: + # An accepting state, pop it and try something else + self.pop() + if not self.stack: + # Done parsing, but another token is input + raise ParseError("too much input", + type, value, context) + else: + # No success finding a transition + raise ParseError("bad input", type, value, context) + + cdef int classify(self, type, value, context): + """Turn a token into a label. (Internal)""" + if type == NAME: + # Keep a listing of all used names + self.used_names.add(value) + # Check for reserved words + ilabel = self._grammar_keywords.get(value) + if ilabel is not None: + return ilabel + ilabel = self._grammar_tokens.get(type) + if ilabel is None: + raise ParseError("bad token", type, value, context) + return ilabel + + cdef void shift(self, type, value, newstate, context): + """Shift a token. (Internal)""" + dfa, state, node = self.stack[-1] + newnode = (type, value, context, None) + newnode = self.convert(newnode) + if newnode is not None: + node[-1].append(newnode) + self.stack[-1] = (dfa, newstate, node) + + cdef void push(self, type, newdfa, newstate, context): + """Push a nonterminal. (Internal)""" + dfa, state, node = self.stack[-1] + newnode = (type, None, context, []) + self.stack[-1] = (dfa, newstate, node) + self.stack.append((newdfa, 0, newnode)) + + cdef void pop(self): + """Pop a nonterminal. (Internal)""" + popdfa, popstate, popnode = self.stack.pop() + newnode = self.convert(popnode) + if newnode is not None: + if self.stack: + dfa, state, node = self.stack[-1] + node[-1].append(newnode) + else: + self.rootnode = newnode + self.rootnode.used_names = self.used_names + + cdef convert(self, raw_node): + type, value, context, children = raw_node + if children or type in self._grammar_number2symbol: + # If there's exactly one child, return that child instead of + # creating a new node. + if len(children) == 1: + return children[0] + return Node(type, children, context=context) + else: + return Leaf(type, value, context=context) diff --git a/sphinx/pycode/pytree.py b/sphinx/pycode/pytree.py index 46017a959..063e39ecb 100644 --- a/sphinx/pycode/pytree.py +++ b/sphinx/pycode/pytree.py @@ -29,11 +29,6 @@ class Base(object): children = () # Tuple of subnodes was_changed = False - def __new__(cls, *args, **kwds): - """Constructor that prevents Base from being instantiated.""" - assert cls is not Base, "Cannot instantiate Base" - return object.__new__(cls) - def __eq__(self, other): """Compares two nodes for equality. @@ -132,7 +127,7 @@ class Node(Base): """Concrete implementation for interior nodes.""" - def __init__(self, type, children, context=None, prefix=None): + def __init__(self, type, children, context=None): """Initializer. Takes a type constant (a symbol number >= 256), a sequence of @@ -140,14 +135,14 @@ class Node(Base): As a side effect, the parent pointers of the children are updated. """ - assert type >= 256, type + # assert type >= 256, type self.type = type self.children = list(children) for ch in self.children: - assert ch.parent is None, repr(ch) + # assert ch.parent is None, repr(ch) ch.parent = self - if prefix is not None: - self.set_prefix(prefix) + # if prefix is not None: + # self.set_prefix(prefix) def __repr__(self): return "%s(%s, %r)" % (self.__class__.__name__, @@ -206,19 +201,19 @@ class Leaf(Base): lineno = 0 # Line where this token starts in the input column = 0 # Column where this token tarts in the input - def __init__(self, type, value, context=None, prefix=None): + def __init__(self, type, value, context=None): """Initializer. Takes a type constant (a token number < 256), a string value, and an optional context keyword argument. """ - assert 0 <= type < 256, type + # assert 0 <= type < 256, type if context is not None: self.prefix, (self.lineno, self.column) = context self.type = type self.value = value - if prefix is not None: - self.prefix = prefix + # if prefix is not None: + # self.prefix = prefix def __repr__(self): return "%s(%r, %r, %r)" % (self.__class__.__name__, From 0edeba7acb187c9f6a229b05c0342c4e693a3b3c Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 01:32:01 +0100 Subject: [PATCH 09/29] Move benchmark into __main__ block. --- sphinx/pycode/__init__.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index dd32d4708..373d4a480 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -160,12 +160,12 @@ class ModuleAnalyzer(object): attr_visitor.visit(self.tree) return attr_visitor.collected - -x0 = time.time() -ma = ModuleAnalyzer.for_module('sphinx.builders.html') -x1 = time.time() -for name, doc in ma.find_attrs(): - print '>>', name - print doc -x2 = time.time() -print "parsing %.4f, finding %.4f" % (x1-x0, x2-x1) +if __name__ == '__main__': + x0 = time.time() + ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') + x1 = time.time() + for name, doc in ma.find_attrs(): + print '>>', name + print doc + x2 = time.time() + print "parsing %.4f, finding %.4f" % (x1-x0, x2-x1) From 83cf7638045a8ed879063707d86005a2e1de2d89 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 02:09:29 +0100 Subject: [PATCH 10/29] First iteration of an autodoc that handles attribute documentation. --- sphinx/ext/autodoc.py | 55 +++++++++++++++++++++++++++++---------- sphinx/pycode/__init__.py | 27 ++++++++++++------- 2 files changed, 59 insertions(+), 23 deletions(-) diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index f94afc759..ddea1e444 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -22,6 +22,7 @@ from docutils.parsers.rst import directives from docutils.statemachine import ViewList from sphinx.util import rpartition, nested_parse_with_titles +from sphinx.pycode import ModuleAnalyzer, PycodeError clstypes = (type, ClassType) try: @@ -313,7 +314,7 @@ class RstGenerator(object): 'for automodule %s' % name) return (path or '') + base, [], None, None - elif what in ('exception', 'function', 'class'): + elif what in ('exception', 'function', 'class', 'data'): if mod is None: if path: mod = path.rstrip('.') @@ -434,7 +435,9 @@ class RstGenerator(object): modfile = None # e.g. for builtin and C modules for part in objpath: todoc = getattr(todoc, part) - except (ImportError, AttributeError), err: + # also get a source code analyzer for attribute docs + analyzer = ModuleAnalyzer.for_module(mod) + except (ImportError, AttributeError, PycodeError), err: self.warn('autodoc can\'t import/find %s %r, it reported error: "%s", ' 'please check your spelling and sys.path' % (what, str(fullname), err)) @@ -503,6 +506,15 @@ class RstGenerator(object): else: sourcename = 'docstring of %s' % fullname + # add content from attribute documentation + attr_docs = analyzer.find_attr_docs() + if what in ('data', 'attribute'): + key = ('.'.join(objpath[:-1]), objpath[-1]) + if key in attr_docs: + no_docstring = True + for i, line in enumerate(attr_docs[key]): + self.result.append(indent + line, sourcename, i) + # add content from docstrings if not no_docstring: for i, line in enumerate(self.get_doc(what, fullname, todoc)): @@ -524,9 +536,9 @@ class RstGenerator(object): self.env.autodoc_current_class = objpath[0] # add members, if possible - _all = members == ['__all__'] + all_members = members == ['__all__'] members_check_module = False - if _all: + if all_members: # unqualified :members: given if what == 'module': if hasattr(todoc, '__all__'): @@ -555,14 +567,28 @@ class RstGenerator(object): else: all_members = [(mname, getattr(todoc, mname)) for mname in members] + # search for members in source code too + namespace = '.'.join(objpath) # will be empty for modules + for (membername, member) in all_members: - if _all and membername.startswith('_'): + # if isattr is True, the member is documented as an attribute + isattr = False + # if content is not None, no extra content from docstrings will be added + content = None + + if all_members and membername.startswith('_'): # ignore members whose name starts with _ by default skip = True else: - # ignore undocumented members if :undoc-members: is not given - doc = getattr(member, '__doc__', None) - skip = not self.options.undoc_members and not doc + if (namespace, membername) in attr_docs: + # keep documented attributes + skip = False + isattr = True + else: + # ignore undocumented members if :undoc-members: is not given + doc = getattr(member, '__doc__', None) + skip = not self.options.undoc_members and not doc + # give the user a chance to decide whether this member should be skipped if self.env.app: # let extensions preprocess docstrings @@ -573,10 +599,11 @@ class RstGenerator(object): if skip: continue - content = None if what == 'module': if isinstance(member, (FunctionType, BuiltinFunctionType)): memberwhat = 'function' + elif isattr: + memberwhat = 'attribute' elif isinstance(member, clstypes): if member.__name__ != membername: # assume it's aliased @@ -588,10 +615,13 @@ class RstGenerator(object): else: memberwhat = 'class' else: - # XXX: todo -- attribute docs continue else: - if isinstance(member, clstypes): + if inspect.isroutine(member): + memberwhat = 'method' + elif isattr: + memberwhat = 'attribute' + elif isinstance(member, clstypes): if member.__name__ != membername: # assume it's aliased memberwhat = 'attribute' @@ -599,12 +629,9 @@ class RstGenerator(object): source='') else: memberwhat = 'class' - elif inspect.isroutine(member): - memberwhat = 'method' elif isdescriptor(member): memberwhat = 'attribute' else: - # XXX: todo -- attribute docs continue # give explicitly separated module name, so that members of inner classes # can be documented diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 373d4a480..0d4058bdb 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -43,7 +43,7 @@ def prepare_commentdoc(s): result.append(line[3:]) if result and result[-1]: result.append('') - return '\n'.join(result) + return result _eq = pytree.Leaf(token.EQUAL, '=') @@ -57,7 +57,7 @@ class ClassAttrVisitor(pytree.NodeVisitor): def init(self, scope): self.scope = scope self.namespace = [] - self.collected = [] + self.collected = {} def visit_classdef(self, node): self.namespace.append(node[1].value) @@ -87,9 +87,9 @@ class ClassAttrVisitor(pytree.NodeVisitor): if target.type != token.NAME: # don't care about complex targets continue - name = '.'.join(self.namespace + [target.value]) - if name.startswith(self.scope): - self.collected.append((name, docstring)) + namespace = '.'.join(self.namespace) + if namespace.startswith(self.scope): + self.collected[namespace, target.value] = docstring def visit_funcdef(self, node): # don't descend into functions -- nothing interesting there @@ -105,6 +105,8 @@ class PycodeError(Exception): class ModuleAnalyzer(object): + # cache for analyzer objects + cache = {} def __init__(self, tree, modname, srcname): self.tree = tree @@ -131,6 +133,8 @@ class ModuleAnalyzer(object): @classmethod def for_module(cls, modname): + if modname in cls.cache: + return cls.cache[modname] if modname not in sys.modules: try: __import__(modname) @@ -142,7 +146,9 @@ class ModuleAnalyzer(object): source = mod.__loader__.get_source(modname) except Exception, err: raise PycodeError('error getting source for %r' % modname, err) - return cls.for_string(source, modname) + obj = cls.for_string(source, modname) + cls.cache[modname] = obj + return obj filename = getattr(mod, '__file__', None) if filename is None: raise PycodeError('no source found for module %r' % modname) @@ -153,13 +159,16 @@ class ModuleAnalyzer(object): raise PycodeError('source is not a .py file: %r' % filename) if not path.isfile(filename): raise PycodeError('source file is not present: %r' % filename) - return cls.for_file(filename, modname) + obj = cls.for_file(filename, modname) + cls.cache[modname] = obj + return obj - def find_attrs(self): - attr_visitor = ClassAttrVisitor(number2name, '') + def find_attr_docs(self, scope=''): + attr_visitor = ClassAttrVisitor(number2name, scope) attr_visitor.visit(self.tree) return attr_visitor.collected + if __name__ == '__main__': x0 = time.time() ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') From 28829fa9d56bcf569c43be69eb866a449dfbf48a Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 02:26:47 +0100 Subject: [PATCH 11/29] Also find attribute docs in the "other" style: docstrings after the assignment. --- sphinx/pycode/__init__.py | 75 +++++++++++++++++++++++++++++++-------- 1 file changed, 61 insertions(+), 14 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 0d4058bdb..f4bd1b1d3 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -14,7 +14,7 @@ import time from os import path from sphinx.pycode import pytree -from sphinx.pycode.pgen2 import driver, token, parse +from sphinx.pycode.pgen2 import driver, token, parse, literals # load the Python grammar @@ -46,13 +46,42 @@ def prepare_commentdoc(s): return result +def prepare_literaldoc(s): + # first, "evaluate" the string + s = literals.evalString(s) + # then, prepare (XXX copied from ext/autodoc) + lines = s.expandtabs().splitlines() + # Find minimum indentation of any non-blank lines after first line. + margin = sys.maxint + for line in lines[1:]: + content = len(line.lstrip()) + if content: + indent = len(line) - content + margin = min(margin, indent) + # Remove indentation. + if lines: + lines[0] = lines[0].lstrip() + if margin < sys.maxint: + for i in range(1, len(lines)): lines[i] = lines[i][margin:] + # Remove any leading blank lines. + while lines and not lines[0]: + lines.pop(0) + # make sure there is an empty line at the end + if lines and lines[-1]: + lines.append('') + return lines + + _eq = pytree.Leaf(token.EQUAL, '=') -class ClassAttrVisitor(pytree.NodeVisitor): +class AttrDocVisitor(pytree.NodeVisitor): """ - Visitor that collects comments appearing before attribute assignments - on toplevel and in classes. + Visitor that collects docstrings for attribute assignments on toplevel and + in classes. + + The docstrings can either be in special '#:' comments before the assignment + or in a docstring after it. """ def init(self, scope): self.scope = scope @@ -65,6 +94,7 @@ class ClassAttrVisitor(pytree.NodeVisitor): self.namespace.pop() def visit_expr_stmt(self, node): + """Visit an assignment which may have a special comment before it.""" if _eq not in node.children: # not an assignment (we don't care for augmented assignments) return @@ -79,8 +109,27 @@ class ClassAttrVisitor(pytree.NodeVisitor): break prefix = pnode.get_prefix() docstring = prepare_commentdoc(prefix) - if not docstring: + if docstring: + self.add_docstring(node, docstring) + + def visit_simple_stmt(self, node): + """Visit a docstring statement which may have an assignment before.""" + if node[0].type != token.STRING: + # not a docstring; but still need to visit children + return self.generic_visit(node) + prev = node.get_prev_sibling() + if not prev: return + if prev.type == sym.simple_stmt and \ + prev[0].type == sym.expr_stmt and _eq in prev[0].children: + docstring = prepare_literaldoc(node[0].value) + self.add_docstring(prev[0], docstring) + + def visit_funcdef(self, node): + # don't descend into functions -- nothing interesting there + return + + def add_docstring(self, node, docstring): # add an item for each assignment target for i in range(0, len(node) - 1, 2): target = node[i] @@ -91,10 +140,6 @@ class ClassAttrVisitor(pytree.NodeVisitor): if namespace.startswith(self.scope): self.collected[namespace, target.value] = docstring - def visit_funcdef(self, node): - # don't descend into functions -- nothing interesting there - return - class PycodeError(Exception): def __str__(self): @@ -164,17 +209,19 @@ class ModuleAnalyzer(object): return obj def find_attr_docs(self, scope=''): - attr_visitor = ClassAttrVisitor(number2name, scope) + attr_visitor = AttrDocVisitor(number2name, scope) attr_visitor.visit(self.tree) return attr_visitor.collected if __name__ == '__main__': x0 = time.time() - ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') + #ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') + ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html') x1 = time.time() - for name, doc in ma.find_attrs(): - print '>>', name - print doc + for (ns, name), doc in ma.find_attr_docs().iteritems(): + print '>>', ns, name + print '\n'.join(doc) x2 = time.time() + #print pytree.nice_repr(ma.tree, number2name) print "parsing %.4f, finding %.4f" % (x1-x0, x2-x1) From 0fb89a8e87af982e1f4f2ea897ba3b9d6356bc4d Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 02:37:01 +0100 Subject: [PATCH 12/29] Ignore .so files. --- .hgignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.hgignore b/.hgignore index 375894228..99fc8d87c 100644 --- a/.hgignore +++ b/.hgignore @@ -1,5 +1,6 @@ .*\.pyc .*\.egg +.*\.so build/ dist/ sphinx/pycode/Grammar.*pickle From 08148a42d1aa9f1c7ed32591a60ac4dd021482d7 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 02:37:20 +0100 Subject: [PATCH 13/29] Move docstring processing to an util module. --- sphinx/ext/autodoc.py | 30 +-------------------- sphinx/pycode/__init__.py | 51 +++++------------------------------ sphinx/util/docstrings.py | 56 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 74 deletions(-) create mode 100644 sphinx/util/docstrings.py diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index ddea1e444..55f0d58fe 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -23,6 +23,7 @@ from docutils.statemachine import ViewList from sphinx.util import rpartition, nested_parse_with_titles from sphinx.pycode import ModuleAnalyzer, PycodeError +from sphinx.util.docstrings import prepare_docstring clstypes = (type, ClassType) try: @@ -172,35 +173,6 @@ def isdescriptor(x): return False -def prepare_docstring(s): - """ - Convert a docstring into lines of parseable reST. Return it as a list of - lines usable for inserting into a docutils ViewList (used as argument - of nested_parse().) An empty line is added to act as a separator between - this docstring and following content. - """ - lines = s.expandtabs().splitlines() - # Find minimum indentation of any non-blank lines after first line. - margin = sys.maxint - for line in lines[1:]: - content = len(line.lstrip()) - if content: - indent = len(line) - content - margin = min(margin, indent) - # Remove indentation. - if lines: - lines[0] = lines[0].lstrip() - if margin < sys.maxint: - for i in range(1, len(lines)): lines[i] = lines[i][margin:] - # Remove any leading blank lines. - while lines and not lines[0]: - lines.pop(0) - # make sure there is an empty line at the end - if lines and lines[-1]: - lines.append('') - return lines - - def get_module_charset(module): """Return the charset of the given module (cached in _module_charsets).""" if module in _module_charsets: diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index f4bd1b1d3..e52a231dd 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -10,11 +10,11 @@ """ import sys -import time from os import path from sphinx.pycode import pytree from sphinx.pycode.pgen2 import driver, token, parse, literals +from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc # load the Python grammar @@ -31,47 +31,6 @@ number2name = pygrammar.number2symbol.copy() number2name.update(token.tok_name) -def prepare_commentdoc(s): - """ - Extract documentation comment lines (starting with #:) and return them as a - list of lines. Returns an empty list if there is no documentation. - """ - result = [] - lines = [line.strip() for line in s.expandtabs().splitlines()] - for line in lines: - if line.startswith('#: '): - result.append(line[3:]) - if result and result[-1]: - result.append('') - return result - - -def prepare_literaldoc(s): - # first, "evaluate" the string - s = literals.evalString(s) - # then, prepare (XXX copied from ext/autodoc) - lines = s.expandtabs().splitlines() - # Find minimum indentation of any non-blank lines after first line. - margin = sys.maxint - for line in lines[1:]: - content = len(line.lstrip()) - if content: - indent = len(line) - content - margin = min(margin, indent) - # Remove indentation. - if lines: - lines[0] = lines[0].lstrip() - if margin < sys.maxint: - for i in range(1, len(lines)): lines[i] = lines[i][margin:] - # Remove any leading blank lines. - while lines and not lines[0]: - lines.pop(0) - # make sure there is an empty line at the end - if lines and lines[-1]: - lines.append('') - return lines - - _eq = pytree.Leaf(token.EQUAL, '=') @@ -122,7 +81,8 @@ class AttrDocVisitor(pytree.NodeVisitor): return if prev.type == sym.simple_stmt and \ prev[0].type == sym.expr_stmt and _eq in prev[0].children: - docstring = prepare_literaldoc(node[0].value) + # need to "eval" the string because it's returned in its original form + docstring = prepare_docstring(literals.evalString(node[0].value)) self.add_docstring(prev[0], docstring) def visit_funcdef(self, node): @@ -215,9 +175,10 @@ class ModuleAnalyzer(object): if __name__ == '__main__': + import time x0 = time.time() - #ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') - ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html') + ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') + #ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html') x1 = time.time() for (ns, name), doc in ma.find_attr_docs().iteritems(): print '>>', ns, name diff --git a/sphinx/util/docstrings.py b/sphinx/util/docstrings.py new file mode 100644 index 000000000..d7e20e4cc --- /dev/null +++ b/sphinx/util/docstrings.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +""" + sphinx.util.docstrings + ~~~~~~~~~~~~~~~~~~~~~~ + + Utilities for docstring processing. + + :copyright: 2008 by Georg Brandl. + :license: BSD, see LICENSE for details. +""" + +import sys + + +def prepare_docstring(s): + """ + Convert a docstring into lines of parseable reST. Return it as a list of + lines usable for inserting into a docutils ViewList (used as argument + of nested_parse().) An empty line is added to act as a separator between + this docstring and following content. + """ + lines = s.expandtabs().splitlines() + # Find minimum indentation of any non-blank lines after first line. + margin = sys.maxint + for line in lines[1:]: + content = len(line.lstrip()) + if content: + indent = len(line) - content + margin = min(margin, indent) + # Remove indentation. + if lines: + lines[0] = lines[0].lstrip() + if margin < sys.maxint: + for i in range(1, len(lines)): lines[i] = lines[i][margin:] + # Remove any leading blank lines. + while lines and not lines[0]: + lines.pop(0) + # make sure there is an empty line at the end + if lines and lines[-1]: + lines.append('') + return lines + + +def prepare_commentdoc(s): + """ + Extract documentation comment lines (starting with #:) and return them as a + list of lines. Returns an empty list if there is no documentation. + """ + result = [] + lines = [line.strip() for line in s.expandtabs().splitlines()] + for line in lines: + if line.startswith('#: '): + result.append(line[3:]) + if result and result[-1]: + result.append('') + return result From ed60ae4118fb67266297f16fd22526f089cd4282 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 12:42:26 +0100 Subject: [PATCH 14/29] * Add a tag-finding method based on tokens. * Don't parse immediately if tokenizing suffices. * Also cache by file name. --- sphinx/pycode/__init__.py | 139 ++++++++++++++++++++++++++-------- sphinx/pycode/pgen2/driver.py | 18 ++--- 2 files changed, 118 insertions(+), 39 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index e52a231dd..f456cfe65 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -11,9 +11,10 @@ import sys from os import path +from cStringIO import StringIO from sphinx.pycode import pytree -from sphinx.pycode.pgen2 import driver, token, parse, literals +from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc @@ -22,9 +23,12 @@ _grammarfile = path.join(path.dirname(__file__), 'Grammar.txt') pygrammar = driver.load_grammar(_grammarfile) pydriver = driver.Driver(pygrammar, convert=pytree.convert) +# an object with attributes corresponding to token and symbol names class sym: pass for k, v in pygrammar.symbol2number.iteritems(): setattr(sym, k, v) +for k, v in token.tok_name.iteritems(): + setattr(sym, v, k) # a dict mapping terminal and nonterminal numbers to their names number2name = pygrammar.number2symbol.copy() @@ -110,36 +114,29 @@ class PycodeError(Exception): class ModuleAnalyzer(object): - # cache for analyzer objects + # cache for analyzer objects -- caches both by module and file name cache = {} - def __init__(self, tree, modname, srcname): - self.tree = tree - self.modname = modname - self.srcname = srcname - @classmethod def for_string(cls, string, modname, srcname=''): - return cls(pydriver.parse_string(string), modname, srcname) + return cls(StringIO(string), modname, srcname) @classmethod def for_file(cls, filename, modname): + if ('file', filename) in cls.cache: + return cls.cache['file', filename] try: fileobj = open(filename, 'r') except Exception, err: raise PycodeError('error opening %r' % filename, err) - try: - try: - return cls(pydriver.parse_stream(fileobj), modname, filename) - except parse.ParseError, err: - raise PycodeError('error parsing %r' % filename, err) - finally: - fileobj.close() + obj = cls(fileobj, modname, filename) + cls.cache['file', filename] = obj + return obj @classmethod def for_module(cls, modname): - if modname in cls.cache: - return cls.cache[modname] + if ('module', modname) in cls.cache: + return cls.cache['module', modname] if modname not in sys.modules: try: __import__(modname) @@ -152,37 +149,119 @@ class ModuleAnalyzer(object): except Exception, err: raise PycodeError('error getting source for %r' % modname, err) obj = cls.for_string(source, modname) - cls.cache[modname] = obj + cls.cache['module', modname] = obj return obj filename = getattr(mod, '__file__', None) if filename is None: raise PycodeError('no source found for module %r' % modname) - if filename.lower().endswith('.pyo') or \ - filename.lower().endswith('.pyc'): + filename = path.normpath(filename) + lfilename = filename.lower() + if lfilename.endswith('.pyo') or lfilename.endswith('.pyc'): filename = filename[:-1] - elif not filename.lower().endswith('.py'): + elif not lfilename.endswith('.py'): raise PycodeError('source is not a .py file: %r' % filename) if not path.isfile(filename): raise PycodeError('source file is not present: %r' % filename) obj = cls.for_file(filename, modname) - cls.cache[modname] = obj + cls.cache['module', modname] = obj return obj + def __init__(self, source, modname, srcname): + self.modname = modname + self.srcname = srcname + # file-like object yielding source lines + self.source = source + + # will be filled by tokenize() + self.tokens = None + # will be filled by parse() + self.parsetree = None + + def tokenize(self): + """Generate tokens from the source.""" + if self.tokens is not None: + return + self.tokens = list(tokenize.generate_tokens(self.source.readline)) + self.source.close() + + def parse(self): + """Parse the generated source tokens.""" + if self.parsetree is not None: + return + self.tokenize() + self.parsetree = pydriver.parse_tokens(self.tokens) + def find_attr_docs(self, scope=''): + """Find class and module-level attributes and their documentation.""" + self.parse() attr_visitor = AttrDocVisitor(number2name, scope) - attr_visitor.visit(self.tree) + attr_visitor.visit(self.parsetree) return attr_visitor.collected + def find_tags(self): + """Find class, function and method definitions and their location.""" + self.tokenize() + result = {} + namespace = [] + stack = [] + indent = 0 + defline = False + expect_indent = False + def tokeniter(ignore = (token.COMMENT, token.NL)): + for tokentup in self.tokens: + if tokentup[0] not in ignore: + yield tokentup + tokeniter = tokeniter() + for type, tok, spos, epos, line in tokeniter: + if expect_indent: + if type != token.INDENT: + # no suite -- one-line definition + assert stack + dtype, fullname, startline, _ = stack.pop() + endline = epos[0] + namespace.pop() + result[dtype, fullname] = (startline, endline) + expect_indent = False + if tok in ('def', 'class'): + name = tokeniter.next()[1] + namespace.append(name) + fullname = '.'.join(namespace) + stack.append((tok, fullname, spos[0], indent)) + defline = True + elif type == token.INDENT: + expect_indent = False + indent += 1 + elif type == token.DEDENT: + indent -= 1 + # if the stacklevel is the same as it was before the last def/class block, + # this dedent closes that block + if stack and indent == stack[-1][3]: + dtype, fullname, startline, _ = stack.pop() + endline = spos[0] + namespace.pop() + result[dtype, fullname] = (startline, endline) + elif type == token.NEWLINE: + # if this line contained a definition, expect an INDENT to start the + # suite; if there is no such INDENT it's a one-line definition + if defline: + defline = False + expect_indent = True + return result + if __name__ == '__main__': - import time + import time, pprint x0 = time.time() - ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') #ma = ModuleAnalyzer.for_file(__file__.rstrip('c'), 'sphinx.builders.html') + ma = ModuleAnalyzer.for_file('sphinx/builders/html.py', 'sphinx.builders.html') + ma.tokenize() x1 = time.time() - for (ns, name), doc in ma.find_attr_docs().iteritems(): - print '>>', ns, name - print '\n'.join(doc) + ma.parse() x2 = time.time() - #print pytree.nice_repr(ma.tree, number2name) - print "parsing %.4f, finding %.4f" % (x1-x0, x2-x1) + #for (ns, name), doc in ma.find_attr_docs().iteritems(): + # print '>>', ns, name + # print '\n'.join(doc) + pprint.pprint(ma.find_tags()) + x3 = time.time() + #print pytree.nice_repr(ma.parsetree, number2name) + print "tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2) diff --git a/sphinx/pycode/pgen2/driver.py b/sphinx/pycode/pgen2/driver.py index 3e9e10435..edc882fa2 100644 --- a/sphinx/pycode/pgen2/driver.py +++ b/sphinx/pycode/pgen2/driver.py @@ -42,8 +42,8 @@ class Driver(object): column = 0 type = value = start = end = line_text = None prefix = "" - for quintuple in tokens: - type, value, start, end, line_text = quintuple + opmap = grammar.opmap + for type, value, start, end, line_text in tokens: if start != (lineno, column): assert (lineno, column) <= start, ((lineno, column), start) s_lineno, s_column = start @@ -62,13 +62,13 @@ class Driver(object): column = 0 continue if type == token.OP: - type = grammar.opmap[value] - if debug: - self.logger.debug("%s %r (prefix=%r)", - token.tok_name[type], value, prefix) + type = opmap[value] + # if debug: + # self.logger.debug("%s %r (prefix=%r)", + # token.tok_name[type], value, prefix) if p.addtoken(type, value, (prefix, start)): - if debug: - self.logger.debug("Stop.") + # if debug: + # self.logger.debug("Stop.") break prefix = "" lineno, column = end @@ -77,7 +77,7 @@ class Driver(object): column = 0 else: # We never broke out -- EOF is too soon (how can this happen???) - raise parse.ParseError("incomplete input", t, v, x) + raise parse.ParseError("incomplete input", type, value, line_text) return p.rootnode def parse_stream_raw(self, stream, debug=False): From ddee927c466689c88847499da579694bf0cc10c9 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 15:41:21 +0100 Subject: [PATCH 15/29] Add "object" option to literalinclude directive. --- sphinx/directives/code.py | 50 +++++++++++++++++++++++++++------------ sphinx/pycode/__init__.py | 4 ++-- 2 files changed, 37 insertions(+), 17 deletions(-) diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py index b43645355..7ac9a1c3a 100644 --- a/sphinx/directives/code.py +++ b/sphinx/directives/code.py @@ -68,32 +68,52 @@ def literalinclude_directive(name, arguments, options, content, lineno, lineno - state_machine.input_offset - 1))) fn = path.normpath(path.join(source_dir, rel_fn)) + fromline = toline = None + objectname = options.get('object') + if objectname is not None: + from sphinx.pycode import ModuleAnalyzer + analyzer = ModuleAnalyzer.for_file(fn, '') + tags = analyzer.find_tags() + if objectname not in tags: + return [state.document.reporter.warning( + 'Object named %r not found in include file %r' % + (objectname, arguments[0]), line=lineno)] + else: + fromline = tags[objectname][1] - 1 + toline = tags[objectname][2] - 1 + encoding = options.get('encoding', env.config.source_encoding) try: f = codecs.open(fn, 'r', encoding) - text = f.read() + lines = f.readlines() f.close() except (IOError, OSError): - retnode = state.document.reporter.warning( - 'Include file %r not found or reading it failed' % arguments[0], line=lineno) + return [state.document.reporter.warning( + 'Include file %r not found or reading it failed' % arguments[0], + line=lineno)] except UnicodeError: - retnode = state.document.reporter.warning( + return [state.document.reporter.warning( 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % - (encoding, arguments[0])) - else: - retnode = nodes.literal_block(text, text, source=fn) - retnode.line = 1 - if options.get('language', ''): - retnode['language'] = options['language'] - if 'linenos' in options: - retnode['linenos'] = True - state.document.settings.env.note_dependency(rel_fn) + (encoding, arguments[0]))] + text = ''.join(lines[fromline:toline]) + retnode = nodes.literal_block(text, text, source=fn) + retnode.line = 1 + if options.get('language', ''): + retnode['language'] = options['language'] + if 'linenos' in options: + retnode['linenos'] = True + state.document.settings.env.note_dependency(rel_fn) return [retnode] literalinclude_directive.options = {'linenos': directives.flag, - 'language': directives.unchanged, - 'encoding': directives.encoding} + 'language': directives.unchanged_required, + 'encoding': directives.encoding, + 'object': directives.unchanged_required, + #'lines': directives.unchanged_required, + #'start-after': directives.unchanged_required, + #'end-before': directives.unchanged_required, + } literalinclude_directive.content = 0 literalinclude_directive.arguments = (1, 0, 0) directives.register_directive('literalinclude', literalinclude_directive) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index f456cfe65..a61208d52 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -220,7 +220,7 @@ class ModuleAnalyzer(object): dtype, fullname, startline, _ = stack.pop() endline = epos[0] namespace.pop() - result[dtype, fullname] = (startline, endline) + result[fullname] = (dtype, startline, endline) expect_indent = False if tok in ('def', 'class'): name = tokeniter.next()[1] @@ -239,7 +239,7 @@ class ModuleAnalyzer(object): dtype, fullname, startline, _ = stack.pop() endline = spos[0] namespace.pop() - result[dtype, fullname] = (startline, endline) + result[fullname] = (dtype, startline, endline) elif type == token.NEWLINE: # if this line contained a definition, expect an INDENT to start the # suite; if there is no such INDENT it's a one-line definition From c80dc58ac12e7ec585bf88fe7c328ef31a14ddfe Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Tue, 30 Dec 2008 15:45:02 +0100 Subject: [PATCH 16/29] Rename "object" to "pyobject" and document it. --- doc/markup/code.rst | 12 ++++++++++++ sphinx/directives/code.py | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/doc/markup/code.rst b/doc/markup/code.rst index 299ab0bc0..2fd51c84b 100644 --- a/doc/markup/code.rst +++ b/doc/markup/code.rst @@ -113,8 +113,20 @@ Includes .. literalinclude:: example.py :encoding: latin-1 + The directive also supports including only parts of the file. If it is a + Python module, you can select a class, function or method to include using + the ``pyobject`` option:: + + .. literalinclude:: example.py + :pyobject: Timer.start + + This would only include the code lines belonging to the ``start()`` method in + the ``Timer`` class within the file. + .. versionadded:: 0.4.3 The ``encoding`` option. + .. versionadded:: 0.6 + The ``pyobject`` option. .. rubric:: Footnotes diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py index 7ac9a1c3a..cc74566d1 100644 --- a/sphinx/directives/code.py +++ b/sphinx/directives/code.py @@ -69,7 +69,7 @@ def literalinclude_directive(name, arguments, options, content, lineno, fn = path.normpath(path.join(source_dir, rel_fn)) fromline = toline = None - objectname = options.get('object') + objectname = options.get('pyobject') if objectname is not None: from sphinx.pycode import ModuleAnalyzer analyzer = ModuleAnalyzer.for_file(fn, '') @@ -109,7 +109,7 @@ def literalinclude_directive(name, arguments, options, content, lineno, literalinclude_directive.options = {'linenos': directives.flag, 'language': directives.unchanged_required, 'encoding': directives.encoding, - 'object': directives.unchanged_required, + 'pyobject': directives.unchanged_required, #'lines': directives.unchanged_required, #'start-after': directives.unchanged_required, #'end-before': directives.unchanged_required, From a10c326bdd25f382532770684aec1100adaac6c7 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Thu, 1 Jan 2009 23:48:10 +0100 Subject: [PATCH 17/29] Add Python license info, add parse.c source generated by Cython. --- LICENSE | 73 +- sphinx/pycode/__init__.py | 12 +- sphinx/pycode/{pytree.py => nodes.py} | 213 +- sphinx/pycode/pgen2/parse.c | 3261 +++++++++++++++++++++++++ sphinx/pycode/pgen2/parse.pyx | 4 +- 5 files changed, 3396 insertions(+), 167 deletions(-) rename sphinx/pycode/{pytree.py => nodes.py} (52%) create mode 100644 sphinx/pycode/pgen2/parse.c diff --git a/LICENSE b/LICENSE index fd4411703..34183cb5b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,17 +1,19 @@ -Copyright (c) 2007-2008 by the respective authors (see AUTHORS file). -All rights reserved. +Copyright (c) 2007-2008 by the Sphinx team (see AUTHORS file). All +rights reserved. + +License for Sphinx +================== Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -24,3 +26,58 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Licenses for incorporated software +================================== + +The pgen2 package, included in this distribution under the name +sphinx.pycode.pgen2, is available in the Python 2.6 distribution under +the PSF license agreement for Python: + +1. This LICENSE AGREEMENT is between the Python Software Foundation + ("PSF"), and the Individual or Organization ("Licensee") accessing + and otherwise using Python 2.6 software in source or binary form + and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF + hereby grants Licensee a nonexclusive, royalty-free, world-wide + license to reproduce, analyze, test, perform and/or display + publicly, prepare derivative works, distribute, and otherwise use + Python 2.6 alone or in any derivative version, provided, however, + that PSF's License Agreement and PSF's notice of copyright, i.e., + "Copyright © 2001-2008 Python Software Foundation; All Rights + Reserved" are retained in Python 2.6 alone or in any derivative + version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on + or incorporates Python 2.6 or any part thereof, and wants to make + the derivative work available to others as provided herein, then + Licensee hereby agrees to include in any such work a brief summary + of the changes made to Python 2.6. + +4. PSF is making Python 2.6 available to Licensee on an "AS IS" basis. + PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY + WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY + REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY + PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 2.6 WILL NOT INFRINGE + ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON + 2.6 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS + AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON + 2.6, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY + THEREOF. + +6. This License Agreement will automatically terminate upon a material + breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any + relationship of agency, partnership, or joint venture between PSF + and Licensee. This License Agreement does not grant permission to + use PSF trademarks or trade name in a trademark sense to endorse or + promote products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python 2.6, Licensee + agrees to be bound by the terms and conditions of this License + Agreement. diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index a61208d52..d707db2d6 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -5,7 +5,7 @@ Utilities parsing and analyzing Python code. - :copyright: 2008 by Georg Brandl. + :copyright: 2008-2009 by Georg Brandl. :license: BSD, see LICENSE for details. """ @@ -13,7 +13,7 @@ import sys from os import path from cStringIO import StringIO -from sphinx.pycode import pytree +from sphinx.pycode import nodes from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc @@ -21,7 +21,7 @@ from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc # load the Python grammar _grammarfile = path.join(path.dirname(__file__), 'Grammar.txt') pygrammar = driver.load_grammar(_grammarfile) -pydriver = driver.Driver(pygrammar, convert=pytree.convert) +pydriver = driver.Driver(pygrammar, convert=nodes.convert) # an object with attributes corresponding to token and symbol names class sym: pass @@ -35,10 +35,10 @@ number2name = pygrammar.number2symbol.copy() number2name.update(token.tok_name) -_eq = pytree.Leaf(token.EQUAL, '=') +_eq = nodes.Leaf(token.EQUAL, '=') -class AttrDocVisitor(pytree.NodeVisitor): +class AttrDocVisitor(nodes.NodeVisitor): """ Visitor that collects docstrings for attribute assignments on toplevel and in classes. @@ -263,5 +263,5 @@ if __name__ == '__main__': # print '\n'.join(doc) pprint.pprint(ma.find_tags()) x3 = time.time() - #print pytree.nice_repr(ma.parsetree, number2name) + #print nodes.nice_repr(ma.parsetree, number2name) print "tokenizing %.4f, parsing %.4f, finding %.4f" % (x1-x0, x2-x1, x3-x2) diff --git a/sphinx/pycode/pytree.py b/sphinx/pycode/nodes.py similarity index 52% rename from sphinx/pycode/pytree.py rename to sphinx/pycode/nodes.py index 063e39ecb..d0fb522bb 100644 --- a/sphinx/pycode/pytree.py +++ b/sphinx/pycode/nodes.py @@ -1,80 +1,47 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. +# -*- coding: utf-8 -*- +""" + sphinx.pycode.nodes + ~~~~~~~~~~~~~~~~~~~ -"""Python parse tree definitions. + Parse tree node implementations. -This is a very concrete parse tree; we need to keep every token and -even the comments and whitespace between tokens. - -Adapted for read-only nodes from pytree.py in Python's 2to3 tool, and -added a few bits. + :copyright: 2009 by Georg Brandl. + :license: BSD, see LICENSE for details. """ -__author__ = "Guido van Rossum " - -class Base(object): - - """Abstract base class for Node and Leaf. - - This provides some default functionality and boilerplate using the - template pattern. - - A node may be a subnode of at most one parent. +class BaseNode(object): + """ + Node superclass for both terminal and nonterminal nodes. """ - # Default values for instance variables - type = None # int: token number (< 256) or symbol number (>= 256) - parent = None # Parent node pointer, or None - children = () # Tuple of subnodes - was_changed = False + def _eq(self, other): + raise NotImplementedError def __eq__(self, other): - """Compares two nodes for equality. - - This calls the method _eq(). - """ if self.__class__ is not other.__class__: return NotImplemented return self._eq(other) def __ne__(self, other): - """Compares two nodes for inequality. - - This calls the method _eq(). - """ if self.__class__ is not other.__class__: return NotImplemented return not self._eq(other) - def _eq(self, other): - """Compares two nodes for equality. - - This is called by __eq__ and __ne__. It is only called if the - two nodes have the same type. This must be implemented by the - concrete subclass. Nodes should be considered equal if they - have the same structure, ignoring the prefix string and other - context information. - """ - raise NotImplementedError - - def get_lineno(self): - """Returns the line number which generated the invocant node.""" - node = self - while not isinstance(node, Leaf): - if not node.children: - return - node = node.children[0] - return node.lineno - - def get_next_sibling(self): - """Return the node immediately following the invocant in their - parent's children list. If the invocant does not have a next - sibling, return None.""" + def get_prev_sibling(self): + """Return previous child in parent's children, or None.""" if self.parent is None: return None + for i, child in enumerate(self.parent.children): + if child is self: + if i == 0: + return None + return self.parent.children[i-1] - # Can't use index(); we need to test by identity + def get_next_sibling(self): + """Return next child in parent's children, or None.""" + if self.parent is None: + return None for i, child in enumerate(self.parent.children): if child is self: try: @@ -82,20 +49,6 @@ class Base(object): except IndexError: return None - def get_prev_sibling(self): - """Return the node immediately preceding the invocant in their - parent's children list. If the invocant does not have a previous - sibling, return None.""" - if self.parent is None: - return None - - # Can't use index(); we need to test by identity - for i, child in enumerate(self.parent.children): - if child is self: - if i == 0: - return None - return self.parent.children[i-1] - def get_prev_leaf(self): """Return the leaf node that precedes this node in the parse tree.""" def last_child(node): @@ -114,46 +67,51 @@ class Base(object): return last_child(prev) return self.parent.get_prev_leaf() - def get_suffix(self): - """Return the string immediately following the invocant node. This - is effectively equivalent to node.get_next_sibling().get_prefix()""" - next_sib = self.get_next_sibling() - if next_sib is None: - return "" - return next_sib.get_prefix() + def get_next_leaf(self): + """Return self if leaf, otherwise the leaf node that succeeds this + node in the parse tree. + """ + node = self + while not isinstance(node, Leaf): + assert node.children + node = node.children[0] + return node + + def get_lineno(self): + """Return the line number which generated the invocant node.""" + return self.get_next_leaf().lineno + + def get_prefix(self): + """Return the prefix of the next leaf node.""" + # only leaves carry a prefix + return self.get_next_leaf().prefix -class Node(Base): - - """Concrete implementation for interior nodes.""" +class Node(BaseNode): + """ + Node implementation for nonterminals. + """ def __init__(self, type, children, context=None): - """Initializer. - - Takes a type constant (a symbol number >= 256), a sequence of - child nodes, and an optional context keyword argument. - - As a side effect, the parent pointers of the children are updated. - """ + # type of nonterminals is >= 256 # assert type >= 256, type self.type = type self.children = list(children) for ch in self.children: # assert ch.parent is None, repr(ch) ch.parent = self - # if prefix is not None: - # self.set_prefix(prefix) def __repr__(self): - return "%s(%s, %r)" % (self.__class__.__name__, - self.type, self.children) + return '%s(%s, %r)' % (self.__class__.__name__, self.type, self.children) def __str__(self): """This reproduces the input source exactly.""" - return "".join(map(str, self.children)) + return ''.join(map(str, self.children)) - def compact(self): - return ''.join(child.compact() for child in self.children) + def _eq(self, other): + return (self.type, self.children) == (other.type, other.children) + + # support indexing the node directly instead of .children def __getitem__(self, index): return self.children[index] @@ -164,84 +122,35 @@ class Node(Base): def __len__(self): return len(self.children) - def _eq(self, other): - """Compares two nodes for equality.""" - return (self.type, self.children) == (other.type, other.children) - def post_order(self): - """Returns a post-order iterator for the tree.""" - for child in self.children: - for node in child.post_order(): - yield node - yield self - - def pre_order(self): - """Returns a pre-order iterator for the tree.""" - yield self - for child in self.children: - for node in child.post_order(): - yield node - - def get_prefix(self): - """Returns the prefix for the node. - - This passes the call on to the first child. - """ - if not self.children: - return "" - return self.children[0].get_prefix() - - -class Leaf(Base): - - """Concrete implementation for leaf nodes.""" - - # Default values for instance variables - prefix = "" # Whitespace and comments preceding this token in the input +class Leaf(BaseNode): + """ + Node implementation for leaf nodes (terminals). + """ + prefix = '' # Whitespace and comments preceding this token in the input lineno = 0 # Line where this token starts in the input column = 0 # Column where this token tarts in the input def __init__(self, type, value, context=None): - """Initializer. - - Takes a type constant (a token number < 256), a string value, - and an optional context keyword argument. - """ + # type of terminals is below 256 # assert 0 <= type < 256, type - if context is not None: - self.prefix, (self.lineno, self.column) = context self.type = type self.value = value - # if prefix is not None: - # self.prefix = prefix + if context is not None: + self.prefix, (self.lineno, self.column) = context def __repr__(self): - return "%s(%r, %r, %r)" % (self.__class__.__name__, + return '%s(%r, %r, %r)' % (self.__class__.__name__, self.type, self.value, self.prefix) def __str__(self): """This reproduces the input source exactly.""" return self.prefix + str(self.value) - def compact(self): - return str(self.value) - def _eq(self, other): """Compares two nodes for equality.""" return (self.type, self.value) == (other.type, other.value) - def post_order(self): - """Returns a post-order iterator for the tree.""" - yield self - - def pre_order(self): - """Returns a pre-order iterator for the tree.""" - yield self - - def get_prefix(self): - """Returns the prefix for the node.""" - return self.prefix - def convert(grammar, raw_node): """Convert raw node to a Node or Leaf instance.""" diff --git a/sphinx/pycode/pgen2/parse.c b/sphinx/pycode/pgen2/parse.c new file mode 100644 index 000000000..fd0e9ff94 --- /dev/null +++ b/sphinx/pycode/pgen2/parse.c @@ -0,0 +1,3261 @@ +/* Generated by Cython 0.9.8.1 on Thu Jan 1 23:45:38 2009 */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#include "structmember.h" +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#if PY_VERSION_HEX < 0x02040000 + #define METH_COEXIST 0 +#endif +#if PY_VERSION_HEX < 0x02050000 + typedef int Py_ssize_t; + #define PY_SSIZE_T_MAX INT_MAX + #define PY_SSIZE_T_MIN INT_MIN + #define PyInt_FromSsize_t(z) PyInt_FromLong(z) + #define PyInt_AsSsize_t(o) PyInt_AsLong(o) + #define PyNumber_Index(o) PyNumber_Int(o) + #define PyIndex_Check(o) PyNumber_Check(o) +#endif +#if PY_VERSION_HEX < 0x02060000 + #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) + #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) + #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) + #define PyVarObject_HEAD_INIT(type, size) \ + PyObject_HEAD_INIT(type) size, + #define PyType_Modified(t) + + typedef struct { + void *buf; + Py_ssize_t len; + int readonly; + const char *format; + int ndim; + Py_ssize_t *shape; + Py_ssize_t *strides; + Py_ssize_t *suboffsets; + Py_ssize_t itemsize; + void *internal; + } Py_buffer; + + #define PyBUF_SIMPLE 0 + #define PyBUF_WRITABLE 0x0001 + #define PyBUF_LOCK 0x0002 + #define PyBUF_FORMAT 0x0004 + #define PyBUF_ND 0x0008 + #define PyBUF_STRIDES (0x0010 | PyBUF_ND) + #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) + #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) + #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) + #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) + +#endif +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" +#endif +#if PY_MAJOR_VERSION >= 3 + #define Py_TPFLAGS_CHECKTYPES 0 + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyString_Type PyBytes_Type + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define PyBytes_Type PyString_Type +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyMethod_New(func, self, klass) PyInstanceMethod_New(func) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif +#else + #define _USE_MATH_DEFINES +#endif +#ifdef __cplusplus +#define __PYX_EXTERN_C extern "C" +#else +#define __PYX_EXTERN_C extern +#endif +#include +#define __PYX_HAVE_API__sphinx__pycode__pgen2__parse + + +#ifdef __GNUC__ +#define INLINE __inline__ +#elif _WIN32 +#define INLINE __inline +#else +#define INLINE +#endif + +typedef struct {PyObject **p; char *s; long n; char is_unicode; char intern; char is_identifier;} __Pyx_StringTabEntry; /*proto*/ + + + +static int __pyx_skip_dispatch = 0; + + +/* Type Conversion Predeclarations */ + +#if PY_MAJOR_VERSION < 3 +#define __Pyx_PyBytes_FromString PyString_FromString +#define __Pyx_PyBytes_AsString PyString_AsString +#else +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_AsString PyBytes_AsString +#endif + +#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) +static INLINE int __Pyx_PyObject_IsTrue(PyObject* x); +static INLINE PY_LONG_LONG __pyx_PyInt_AsLongLong(PyObject* x); +static INLINE unsigned PY_LONG_LONG __pyx_PyInt_AsUnsignedLongLong(PyObject* x); +static INLINE Py_ssize_t __pyx_PyIndex_AsSsize_t(PyObject* b); + +#define __pyx_PyInt_AsLong(x) (PyInt_CheckExact(x) ? PyInt_AS_LONG(x) : PyInt_AsLong(x)) +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) + +static INLINE unsigned char __pyx_PyInt_unsigned_char(PyObject* x); +static INLINE unsigned short __pyx_PyInt_unsigned_short(PyObject* x); +static INLINE char __pyx_PyInt_char(PyObject* x); +static INLINE short __pyx_PyInt_short(PyObject* x); +static INLINE int __pyx_PyInt_int(PyObject* x); +static INLINE long __pyx_PyInt_long(PyObject* x); +static INLINE signed char __pyx_PyInt_signed_char(PyObject* x); +static INLINE signed short __pyx_PyInt_signed_short(PyObject* x); +static INLINE signed int __pyx_PyInt_signed_int(PyObject* x); +static INLINE signed long __pyx_PyInt_signed_long(PyObject* x); +static INLINE long double __pyx_PyInt_long_double(PyObject* x); +#ifdef __GNUC__ +/* Test for GCC > 2.95 */ +#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)) +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) +#else /* __GNUC__ > 2 ... */ +#define likely(x) (x) +#define unlikely(x) (x) +#endif /* __GNUC__ > 2 ... */ +#else /* __GNUC__ */ +#define likely(x) (x) +#define unlikely(x) (x) +#endif /* __GNUC__ */ + +static PyObject *__pyx_m; +static PyObject *__pyx_b; +static PyObject *__pyx_empty_tuple; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; +static const char **__pyx_f; + +static INLINE void __Pyx_RaiseArgtupleTooLong(Py_ssize_t num_expected, Py_ssize_t num_found); /*proto*/ + +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/ + +static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/ + +static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, char *modname); /*proto*/ + +static INLINE PyObject *__Pyx_GetItemInt(PyObject *o, Py_ssize_t i, int is_unsigned) { + PyObject *r; + if (PyList_CheckExact(o) && 0 <= i && i < PyList_GET_SIZE(o)) { + r = PyList_GET_ITEM(o, i); + Py_INCREF(r); + } + else if (PyTuple_CheckExact(o) && 0 <= i && i < PyTuple_GET_SIZE(o)) { + r = PyTuple_GET_ITEM(o, i); + Py_INCREF(r); + } + else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0) || !is_unsigned)) + r = PySequence_GetItem(o, i); + else { + PyObject *j = (likely(i >= 0) || !is_unsigned) ? PyInt_FromLong(i) : PyLong_FromUnsignedLongLong((sizeof(unsigned long long) > sizeof(Py_ssize_t) ? (1ULL << (sizeof(Py_ssize_t)*8)) : 0) + i); + if (!j) + return 0; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + } + return r; +} + +static PyObject *__Pyx_UnpackItem(PyObject *, Py_ssize_t index); /*proto*/ +static int __Pyx_EndUnpack(PyObject *); /*proto*/ + +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ + +static INLINE PyObject* __Pyx_PyObject_Append(PyObject* L, PyObject* x) { + if (likely(PyList_CheckExact(L))) { + if (PyList_Append(L, x) < 0) return NULL; + Py_INCREF(Py_None); + return Py_None; // this is just to have an accurate signature + } + else { + return PyObject_CallMethod(L, "append", "(O)", x); + } +} + +static INLINE int __Pyx_SetItemInt(PyObject *o, Py_ssize_t i, PyObject *v, int is_unsigned) { + int r; + if (PyList_CheckExact(o) && 0 <= i && i < PyList_GET_SIZE(o)) { + Py_DECREF(PyList_GET_ITEM(o, i)); + Py_INCREF(v); + PyList_SET_ITEM(o, i, v); + return 1; + } + else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_ass_item && (likely(i >= 0) || !is_unsigned)) + r = PySequence_SetItem(o, i, v); + else { + PyObject *j = (likely(i >= 0) || !is_unsigned) ? PyInt_FromLong(i) : PyLong_FromUnsignedLongLong((sizeof(unsigned long long) > sizeof(Py_ssize_t) ? (1ULL << (sizeof(Py_ssize_t)*8)) : 0) + i); + if (!j) + return -1; + r = PyObject_SetItem(o, j, v); + Py_DECREF(j); + } + return r; +} + +static void __Pyx_WriteUnraisable(const char *name); /*proto*/ + +static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/ + +static void __Pyx_AddTraceback(const char *funcname); /*proto*/ + +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ + +/* Type declarations */ + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":31 + * + * + * cdef class Parser: # <<<<<<<<<<<<<< + * cdef public grammar, stack, rootnode, used_names + * cdef _grammar_dfas, _grammar_labels, _grammar_keywords, _grammar_tokens + */ + +struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser { + PyObject_HEAD + struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_vtab; + PyObject *grammar; + PyObject *stack; + PyObject *rootnode; + PyObject *used_names; + PyObject *_grammar_dfas; + PyObject *_grammar_labels; + PyObject *_grammar_keywords; + PyObject *_grammar_tokens; + PyObject *_grammar_number2symbol; +}; + + +struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser { + int (*classify)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *, PyObject *, PyObject *); + void (*shift)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *, PyObject *, PyObject *, PyObject *); + void (*push)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *, PyObject *, PyObject *, PyObject *); + void (*pop)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *); + PyObject *(*convert)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *); +}; +static struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_vtabptr_6sphinx_6pycode_5pgen2_5parse_Parser; +/* Module declarations from sphinx.pycode.pgen2.parse */ + +static PyTypeObject *__pyx_ptype_6sphinx_6pycode_5pgen2_5parse_Parser = 0; + + +/* Implementation of sphinx.pycode.pgen2.parse */ +static char __pyx_k_2[] = "Exception to signal the parser is stuck."; +static PyObject *__pyx_int_0; +static PyObject *__pyx_int_1; +static char __pyx_k___init__[] = "__init__"; +static PyObject *__pyx_kp___init__; +static char __pyx_k_setup[] = "setup"; +static PyObject *__pyx_kp_setup; +static char __pyx_k_addtoken[] = "addtoken"; +static PyObject *__pyx_kp_addtoken; +static char __pyx_k_1[] = "sphinx.pycode.nodes"; +static PyObject *__pyx_kp_1; +static char __pyx_k_Node[] = "Node"; +static PyObject *__pyx_kp_Node; +static char __pyx_k_Leaf[] = "Leaf"; +static PyObject *__pyx_kp_Leaf; +static char __pyx_k_ParseError[] = "ParseError"; +static PyObject *__pyx_kp_ParseError; +static char __pyx_k_Exception[] = "Exception"; +static PyObject *__pyx_kp_Exception; +static char __pyx_k_msg[] = "msg"; +static PyObject *__pyx_kp_msg; +static char __pyx_k_type[] = "type"; +static PyObject *__pyx_kp_type; +static char __pyx_k_value[] = "value"; +static PyObject *__pyx_kp_value; +static char __pyx_k_context[] = "context"; +static PyObject *__pyx_kp_context; +static char __pyx_k_dfas[] = "dfas"; +static PyObject *__pyx_kp_dfas; +static char __pyx_k_labels[] = "labels"; +static PyObject *__pyx_kp_labels; +static char __pyx_k_keywords[] = "keywords"; +static PyObject *__pyx_kp_keywords; +static char __pyx_k_tokens[] = "tokens"; +static PyObject *__pyx_kp_tokens; +static char __pyx_k_4[] = "number2symbol"; +static PyObject *__pyx_kp_4; +static char __pyx_k_start[] = "start"; +static PyObject *__pyx_kp_start; +static char __pyx_k_add[] = "add"; +static PyObject *__pyx_kp_add; +static char __pyx_k_get[] = "get"; +static PyObject *__pyx_kp_get; +static char __pyx_k_append[] = "append"; +static PyObject *__pyx_kp_append; +static char __pyx_k_pop[] = "pop"; +static PyObject *__pyx_kp_pop; +static char __pyx_k_used_names[] = "used_names"; +static PyObject *__pyx_kp_used_names; +static PyObject *__pyx_kp_2; +static PyObject *__pyx_builtin_Exception; +static PyObject *__pyx_kp_3; +static char __pyx_k_3[] = "%s: type=%r, value=%r, context=%r"; +static PyObject *__pyx_kp_5; +static PyObject *__pyx_kp_6; +static char __pyx_k_5[] = "too much input"; +static char __pyx_k_6[] = "bad input"; +static PyObject *__pyx_kp_7; +static char __pyx_k_7[] = "bad token"; + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":22 + * """Exception to signal the parser is stuck.""" + * + * def __init__(self, msg, type, value, context): # <<<<<<<<<<<<<< + * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % + * (msg, type, value, context)) + */ + +static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__ = {"__init__", (PyCFunction)__pyx_pf_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_self = 0; + PyObject *__pyx_v_msg = 0; + PyObject *__pyx_v_type = 0; + PyObject *__pyx_v_value = 0; + PyObject *__pyx_v_context = 0; + PyObject *__pyx_r; + PyObject *__pyx_1 = 0; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + static char *__pyx_argnames[] = {"self","msg","type","value","context",0}; + __pyx_self = __pyx_self; + if (likely(!__pyx_kwds) && likely(PyTuple_GET_SIZE(__pyx_args) == 5)) { + __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0); + __pyx_v_msg = PyTuple_GET_ITEM(__pyx_args, 1); + __pyx_v_type = PyTuple_GET_ITEM(__pyx_args, 2); + __pyx_v_value = PyTuple_GET_ITEM(__pyx_args, 3); + __pyx_v_context = PyTuple_GET_ITEM(__pyx_args, 4); + } + else { + if (unlikely(!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OOOOO", __pyx_argnames, &__pyx_v_self, &__pyx_v_msg, &__pyx_v_type, &__pyx_v_value, &__pyx_v_context))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + } + goto __pyx_L4; + __pyx_L3_error:; + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.ParseError.__init__"); + return NULL; + __pyx_L4:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":23 + * + * def __init__(self, msg, type, value, context): + * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % # <<<<<<<<<<<<<< + * (msg, type, value, context)) + * self.msg = msg + */ + __pyx_1 = PyObject_GetAttr(__pyx_builtin_Exception, __pyx_kp___init__); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":24 + * def __init__(self, msg, type, value, context): + * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % + * (msg, type, value, context)) # <<<<<<<<<<<<<< + * self.msg = msg + * self.type = type + */ + __pyx_2 = PyTuple_New(4); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_msg); + PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_msg); + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_2, 1, __pyx_v_type); + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_2, 2, __pyx_v_value); + Py_INCREF(__pyx_v_context); + PyTuple_SET_ITEM(__pyx_2, 3, __pyx_v_context); + __pyx_3 = PyNumber_Remainder(__pyx_kp_3, ((PyObject *)__pyx_2)); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_2)); __pyx_2 = 0; + __pyx_2 = PyTuple_New(2); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_self); + PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_self); + PyTuple_SET_ITEM(__pyx_2, 1, __pyx_3); + __pyx_3 = 0; + __pyx_3 = PyObject_Call(__pyx_1, ((PyObject *)__pyx_2), NULL); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_2)); __pyx_2 = 0; + Py_DECREF(__pyx_3); __pyx_3 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":25 + * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % + * (msg, type, value, context)) + * self.msg = msg # <<<<<<<<<<<<<< + * self.type = type + * self.value = value + */ + if (PyObject_SetAttr(__pyx_v_self, __pyx_kp_msg, __pyx_v_msg) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":26 + * (msg, type, value, context)) + * self.msg = msg + * self.type = type # <<<<<<<<<<<<<< + * self.value = value + * self.context = context + */ + if (PyObject_SetAttr(__pyx_v_self, __pyx_kp_type, __pyx_v_type) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":27 + * self.msg = msg + * self.type = type + * self.value = value # <<<<<<<<<<<<<< + * self.context = context + * + */ + if (PyObject_SetAttr(__pyx_v_self, __pyx_kp_value, __pyx_v_value) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":28 + * self.type = type + * self.value = value + * self.context = context # <<<<<<<<<<<<<< + * + * + */ + if (PyObject_SetAttr(__pyx_v_self, __pyx_kp_context, __pyx_v_context) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + + __pyx_r = Py_None; Py_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.ParseError.__init__"); + __pyx_r = NULL; + __pyx_L0:; + return __pyx_r; +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":36 + * cdef _grammar_number2symbol + * + * def __init__(self, grammar, convert=None): # <<<<<<<<<<<<<< + * self.grammar = grammar + * #self.convert = convert or noconvert + */ + +static int __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_grammar = 0; + PyObject *__pyx_v_convert = 0; + int __pyx_r; + PyObject *__pyx_1 = 0; + static char *__pyx_argnames[] = {"grammar","convert",0}; + __pyx_v_convert = Py_None; + if (likely(!__pyx_kwds) && likely(1 <= PyTuple_GET_SIZE(__pyx_args)) && likely(PyTuple_GET_SIZE(__pyx_args) <= 2)) { + __pyx_v_grammar = PyTuple_GET_ITEM(__pyx_args, 0); + if (PyTuple_GET_SIZE(__pyx_args) > 1) { + __pyx_v_convert = PyTuple_GET_ITEM(__pyx_args, 1); + } + } + else { + if (unlikely(!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "O|O", __pyx_argnames, &__pyx_v_grammar, &__pyx_v_convert))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 36; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + } + goto __pyx_L4; + __pyx_L3_error:; + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.__init__"); + return -1; + __pyx_L4:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":37 + * + * def __init__(self, grammar, convert=None): + * self.grammar = grammar # <<<<<<<<<<<<<< + * #self.convert = convert or noconvert + * + */ + Py_INCREF(__pyx_v_grammar); + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->grammar); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->grammar = __pyx_v_grammar; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":40 + * #self.convert = convert or noconvert + * + * self._grammar_dfas = grammar.dfas # <<<<<<<<<<<<<< + * self._grammar_labels = grammar.labels + * self._grammar_keywords = grammar.keywords + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_kp_dfas); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 40; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas = __pyx_1; + __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":41 + * + * self._grammar_dfas = grammar.dfas + * self._grammar_labels = grammar.labels # <<<<<<<<<<<<<< + * self._grammar_keywords = grammar.keywords + * self._grammar_tokens = grammar.tokens + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_kp_labels); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 41; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_labels); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_labels = __pyx_1; + __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":42 + * self._grammar_dfas = grammar.dfas + * self._grammar_labels = grammar.labels + * self._grammar_keywords = grammar.keywords # <<<<<<<<<<<<<< + * self._grammar_tokens = grammar.tokens + * self._grammar_number2symbol = grammar.number2symbol + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_kp_keywords); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 42; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_keywords); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_keywords = __pyx_1; + __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":43 + * self._grammar_labels = grammar.labels + * self._grammar_keywords = grammar.keywords + * self._grammar_tokens = grammar.tokens # <<<<<<<<<<<<<< + * self._grammar_number2symbol = grammar.number2symbol + * + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_kp_tokens); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_tokens); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_tokens = __pyx_1; + __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":44 + * self._grammar_keywords = grammar.keywords + * self._grammar_tokens = grammar.tokens + * self._grammar_number2symbol = grammar.number2symbol # <<<<<<<<<<<<<< + * + * def setup(self, start=None): + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_kp_4); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 44; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_number2symbol); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_number2symbol = __pyx_1; + __pyx_1 = 0; + + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.__init__"); + __pyx_r = -1; + __pyx_L0:; + return __pyx_r; +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":46 + * self._grammar_number2symbol = grammar.number2symbol + * + * def setup(self, start=None): # <<<<<<<<<<<<<< + * if start is None: + * start = self.grammar.start + */ + +static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_setup(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_setup(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_start = 0; + PyObject *__pyx_v_newnode; + PyObject *__pyx_v_stackentry; + PyObject *__pyx_r; + int __pyx_1; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + static char *__pyx_argnames[] = {"start",0}; + __pyx_v_start = Py_None; + if (likely(!__pyx_kwds) && likely(0 <= PyTuple_GET_SIZE(__pyx_args)) && likely(PyTuple_GET_SIZE(__pyx_args) <= 1)) { + if (PyTuple_GET_SIZE(__pyx_args) > 0) { + __pyx_v_start = PyTuple_GET_ITEM(__pyx_args, 0); + } + } + else { + if (unlikely(!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "|O", __pyx_argnames, &__pyx_v_start))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + } + goto __pyx_L4; + __pyx_L3_error:; + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.setup"); + return NULL; + __pyx_L4:; + Py_INCREF(__pyx_v_start); + __pyx_v_newnode = Py_None; Py_INCREF(Py_None); + __pyx_v_stackentry = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":47 + * + * def setup(self, start=None): + * if start is None: # <<<<<<<<<<<<<< + * start = self.grammar.start + * # Each stack entry is a tuple: (dfa, state, node). + */ + __pyx_1 = (__pyx_v_start == Py_None); + if (__pyx_1) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":48 + * def setup(self, start=None): + * if start is None: + * start = self.grammar.start # <<<<<<<<<<<<<< + * # Each stack entry is a tuple: (dfa, state, node). + * # A node is a tuple: (type, value, context, children), + */ + __pyx_2 = PyObject_GetAttr(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->grammar, __pyx_kp_start); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_start); + __pyx_v_start = __pyx_2; + __pyx_2 = 0; + goto __pyx_L5; + } + __pyx_L5:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":52 + * # A node is a tuple: (type, value, context, children), + * # where children is a list of nodes or None, and context may be None. + * newnode = (start, None, None, []) # <<<<<<<<<<<<<< + * stackentry = (self._grammar_dfas[start], 0, newnode) + * self.stack = [stackentry] + */ + __pyx_2 = PyList_New(0); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = PyTuple_New(4); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_start); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_start); + Py_INCREF(Py_None); + PyTuple_SET_ITEM(__pyx_3, 1, Py_None); + Py_INCREF(Py_None); + PyTuple_SET_ITEM(__pyx_3, 2, Py_None); + PyTuple_SET_ITEM(__pyx_3, 3, ((PyObject *)__pyx_2)); + __pyx_2 = 0; + Py_DECREF(__pyx_v_newnode); + __pyx_v_newnode = ((PyObject *)__pyx_3); + __pyx_3 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":53 + * # where children is a list of nodes or None, and context may be None. + * newnode = (start, None, None, []) + * stackentry = (self._grammar_dfas[start], 0, newnode) # <<<<<<<<<<<<<< + * self.stack = [stackentry] + * self.rootnode = None + */ + __pyx_2 = PyObject_GetItem(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas, __pyx_v_start); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = PyTuple_New(3); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 53; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_2); + Py_INCREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_3, 1, __pyx_int_0); + Py_INCREF(__pyx_v_newnode); + PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_newnode); + __pyx_2 = 0; + Py_DECREF(__pyx_v_stackentry); + __pyx_v_stackentry = ((PyObject *)__pyx_3); + __pyx_3 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":54 + * newnode = (start, None, None, []) + * stackentry = (self._grammar_dfas[start], 0, newnode) + * self.stack = [stackentry] # <<<<<<<<<<<<<< + * self.rootnode = None + * self.used_names = set() # Aliased to self.rootnode.used_names in pop() + */ + __pyx_2 = PyList_New(1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_stackentry); + PyList_SET_ITEM(__pyx_2, 0, __pyx_v_stackentry); + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack = ((PyObject *)__pyx_2); + __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":55 + * stackentry = (self._grammar_dfas[start], 0, newnode) + * self.stack = [stackentry] + * self.rootnode = None # <<<<<<<<<<<<<< + * self.used_names = set() # Aliased to self.rootnode.used_names in pop() + * + */ + Py_INCREF(Py_None); + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->rootnode); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->rootnode = Py_None; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":56 + * self.stack = [stackentry] + * self.rootnode = None + * self.used_names = set() # Aliased to self.rootnode.used_names in pop() # <<<<<<<<<<<<<< + * + * def addtoken(self, type, value, context): + */ + __pyx_3 = PyObject_Call(((PyObject*)&PySet_Type), ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->used_names); + ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->used_names = __pyx_3; + __pyx_3 = 0; + + __pyx_r = Py_None; Py_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.setup"); + __pyx_r = NULL; + __pyx_L0:; + Py_DECREF(__pyx_v_newnode); + Py_DECREF(__pyx_v_stackentry); + Py_DECREF(__pyx_v_start); + return __pyx_r; +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":58 + * self.used_names = set() # Aliased to self.rootnode.used_names in pop() + * + * def addtoken(self, type, value, context): # <<<<<<<<<<<<<< + * """Add a token; return True iff this is the end of the program.""" + * cdef int ilabel, i, t, state, newstate + */ + +static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken[] = "Add a token; return True iff this is the end of the program."; +static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_type = 0; + PyObject *__pyx_v_value = 0; + PyObject *__pyx_v_context = 0; + int __pyx_v_ilabel; + int __pyx_v_i; + int __pyx_v_t; + int __pyx_v_state; + int __pyx_v_newstate; + PyObject *__pyx_v_dfa; + PyObject *__pyx_v_node; + PyObject *__pyx_v_states; + PyObject *__pyx_v_first; + PyObject *__pyx_v_arcs; + PyObject *__pyx_v_v; + PyObject *__pyx_v_itsdfa; + PyObject *__pyx_v_itsstates; + PyObject *__pyx_v_itsfirst; + PyObject *__pyx_r; + int __pyx_1; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + PyObject *__pyx_4 = 0; + int __pyx_5; + Py_ssize_t __pyx_6 = 0; + PyObject *__pyx_7 = 0; + int __pyx_8; + static char *__pyx_argnames[] = {"type","value","context",0}; + if (likely(!__pyx_kwds) && likely(PyTuple_GET_SIZE(__pyx_args) == 3)) { + __pyx_v_type = PyTuple_GET_ITEM(__pyx_args, 0); + __pyx_v_value = PyTuple_GET_ITEM(__pyx_args, 1); + __pyx_v_context = PyTuple_GET_ITEM(__pyx_args, 2); + } + else { + if (unlikely(!PyArg_ParseTupleAndKeywords(__pyx_args, __pyx_kwds, "OOO", __pyx_argnames, &__pyx_v_type, &__pyx_v_value, &__pyx_v_context))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L3_error;} + } + goto __pyx_L4; + __pyx_L3_error:; + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.addtoken"); + return NULL; + __pyx_L4:; + __pyx_v_dfa = Py_None; Py_INCREF(Py_None); + __pyx_v_node = Py_None; Py_INCREF(Py_None); + __pyx_v_states = Py_None; Py_INCREF(Py_None); + __pyx_v_first = Py_None; Py_INCREF(Py_None); + __pyx_v_arcs = Py_None; Py_INCREF(Py_None); + __pyx_v_v = Py_None; Py_INCREF(Py_None); + __pyx_v_itsdfa = Py_None; Py_INCREF(Py_None); + __pyx_v_itsstates = Py_None; Py_INCREF(Py_None); + __pyx_v_itsfirst = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":62 + * cdef int ilabel, i, t, state, newstate + * # Map from token to label + * ilabel = self.classify(type, value, context) # <<<<<<<<<<<<<< + * # Loop until the token is shifted; may raise exceptions + * while True: + */ + __pyx_v_ilabel = ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->classify(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self), __pyx_v_type, __pyx_v_value, __pyx_v_context); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":64 + * ilabel = self.classify(type, value, context) + * # Loop until the token is shifted; may raise exceptions + * while True: # <<<<<<<<<<<<<< + * dfa, state, node = self.stack[-1] + * states, first = dfa + */ + while (1) { + __pyx_1 = 1; + if (!__pyx_1) break; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":65 + * # Loop until the token is shifted; may raise exceptions + * while True: + * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<< + * states, first = dfa + * arcs = states[state] + */ + __pyx_2 = __Pyx_GetItemInt(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack, -1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyTuple_CheckExact(__pyx_2) && PyTuple_GET_SIZE(__pyx_2) == 3) { + PyObject* tuple = __pyx_2; + __pyx_4 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_4); + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_4; + __pyx_4 = 0; + __pyx_4 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_4); + __pyx_5 = __pyx_PyInt_int(__pyx_4); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + __pyx_v_state = __pyx_5; + __pyx_4 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_4); + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_4; + __pyx_4 = 0; + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + else { + __pyx_3 = PyObject_GetIter(__pyx_2); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + __pyx_4 = __Pyx_UnpackItem(__pyx_3, 0); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_4; + __pyx_4 = 0; + __pyx_4 = __Pyx_UnpackItem(__pyx_3, 1); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_5 = __pyx_PyInt_int(__pyx_4); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + __pyx_v_state = __pyx_5; + __pyx_4 = __Pyx_UnpackItem(__pyx_3, 2); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_4; + __pyx_4 = 0; + if (__Pyx_EndUnpack(__pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 65; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_3); __pyx_3 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":66 + * while True: + * dfa, state, node = self.stack[-1] + * states, first = dfa # <<<<<<<<<<<<<< + * arcs = states[state] + * # Look for a state with this label + */ + if (PyTuple_CheckExact(__pyx_v_dfa) && PyTuple_GET_SIZE(__pyx_v_dfa) == 2) { + PyObject* tuple = __pyx_v_dfa; + __pyx_2 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_states); + __pyx_v_states = __pyx_2; + __pyx_2 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_first); + __pyx_v_first = __pyx_3; + __pyx_3 = 0; + } + else { + __pyx_4 = PyObject_GetIter(__pyx_v_dfa); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = __Pyx_UnpackItem(__pyx_4, 0); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_states); + __pyx_v_states = __pyx_2; + __pyx_2 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_4, 1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_first); + __pyx_v_first = __pyx_3; + __pyx_3 = 0; + if (__Pyx_EndUnpack(__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":67 + * dfa, state, node = self.stack[-1] + * states, first = dfa + * arcs = states[state] # <<<<<<<<<<<<<< + * # Look for a state with this label + * for i, newstate in arcs: + */ + __pyx_2 = __Pyx_GetItemInt(__pyx_v_states, __pyx_v_state, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 67; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_arcs); + __pyx_v_arcs = __pyx_2; + __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":69 + * arcs = states[state] + * # Look for a state with this label + * for i, newstate in arcs: # <<<<<<<<<<<<<< + * t, v = self._grammar_labels[i] + * if ilabel == i: + */ + if (PyList_CheckExact(__pyx_v_arcs) || PyTuple_CheckExact(__pyx_v_arcs)) { + __pyx_6 = 0; __pyx_3 = __pyx_v_arcs; Py_INCREF(__pyx_3); + } else { + __pyx_6 = -1; __pyx_3 = PyObject_GetIter(__pyx_v_arcs); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + } + for (;;) { + if (likely(PyList_CheckExact(__pyx_3))) { + if (__pyx_6 >= PyList_GET_SIZE(__pyx_3)) break; + __pyx_4 = PyList_GET_ITEM(__pyx_3, __pyx_6); Py_INCREF(__pyx_4); __pyx_6++; + } else if (likely(PyTuple_CheckExact(__pyx_3))) { + if (__pyx_6 >= PyTuple_GET_SIZE(__pyx_3)) break; + __pyx_4 = PyTuple_GET_ITEM(__pyx_3, __pyx_6); Py_INCREF(__pyx_4); __pyx_6++; + } else { + __pyx_4 = PyIter_Next(__pyx_3); + if (!__pyx_4) { + if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + break; + } + } + if (PyTuple_CheckExact(__pyx_4) && PyTuple_GET_SIZE(__pyx_4) == 2) { + PyObject* tuple = __pyx_4; + __pyx_7 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_7); + __pyx_5 = __pyx_PyInt_int(__pyx_7); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_v_i = __pyx_5; + __pyx_7 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_7); + __pyx_5 = __pyx_PyInt_int(__pyx_7); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_v_newstate = __pyx_5; + Py_DECREF(__pyx_4); __pyx_4 = 0; + } + else { + __pyx_2 = PyObject_GetIter(__pyx_4); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + __pyx_7 = __Pyx_UnpackItem(__pyx_2, 0); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_5 = __pyx_PyInt_int(__pyx_7); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_v_i = __pyx_5; + __pyx_7 = __Pyx_UnpackItem(__pyx_2, 1); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_5 = __pyx_PyInt_int(__pyx_7); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_v_newstate = __pyx_5; + if (__Pyx_EndUnpack(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 69; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":70 + * # Look for a state with this label + * for i, newstate in arcs: + * t, v = self._grammar_labels[i] # <<<<<<<<<<<<<< + * if ilabel == i: + * # Look it up in the list of labels + */ + __pyx_7 = __Pyx_GetItemInt(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_labels, __pyx_v_i, 0); if (!__pyx_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyTuple_CheckExact(__pyx_7) && PyTuple_GET_SIZE(__pyx_7) == 2) { + PyObject* tuple = __pyx_7; + __pyx_2 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_2); + __pyx_5 = __pyx_PyInt_int(__pyx_2); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + __pyx_v_t = __pyx_5; + __pyx_2 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_v); + __pyx_v_v = __pyx_2; + __pyx_2 = 0; + Py_DECREF(__pyx_7); __pyx_7 = 0; + } + else { + __pyx_4 = PyObject_GetIter(__pyx_7); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_2 = __Pyx_UnpackItem(__pyx_4, 0); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_5 = __pyx_PyInt_int(__pyx_2); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + __pyx_v_t = __pyx_5; + __pyx_2 = __Pyx_UnpackItem(__pyx_4, 1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_v); + __pyx_v_v = __pyx_2; + __pyx_2 = 0; + if (__Pyx_EndUnpack(__pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":71 + * for i, newstate in arcs: + * t, v = self._grammar_labels[i] + * if ilabel == i: # <<<<<<<<<<<<<< + * # Look it up in the list of labels + * ## assert t < 256 + */ + __pyx_1 = (__pyx_v_ilabel == __pyx_v_i); + if (__pyx_1) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":75 + * ## assert t < 256 + * # Shift a token; we're done with it + * self.shift(type, value, newstate, context) # <<<<<<<<<<<<<< + * # Pop while we are in an accept-only state + * state = newstate + */ + __pyx_2 = PyInt_FromLong(__pyx_v_newstate); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->shift(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self), __pyx_v_type, __pyx_v_value, __pyx_2, __pyx_v_context); + Py_DECREF(__pyx_2); __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":77 + * self.shift(type, value, newstate, context) + * # Pop while we are in an accept-only state + * state = newstate # <<<<<<<<<<<<<< + * while states[state] == [(0, state)]: + * self.pop() + */ + __pyx_v_state = __pyx_v_newstate; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":78 + * # Pop while we are in an accept-only state + * state = newstate + * while states[state] == [(0, state)]: # <<<<<<<<<<<<<< + * self.pop() + * if not self.stack: + */ + while (1) { + __pyx_7 = __Pyx_GetItemInt(__pyx_v_states, __pyx_v_state, 0); if (!__pyx_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_4 = PyInt_FromLong(__pyx_v_state); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = PyTuple_New(2); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_2, 0, __pyx_int_0); + PyTuple_SET_ITEM(__pyx_2, 1, __pyx_4); + __pyx_4 = 0; + __pyx_4 = PyList_New(1); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + PyList_SET_ITEM(__pyx_4, 0, ((PyObject *)__pyx_2)); + __pyx_2 = 0; + __pyx_2 = PyObject_RichCompare(__pyx_7, ((PyObject *)__pyx_4), Py_EQ); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + Py_DECREF(((PyObject *)__pyx_4)); __pyx_4 = 0; + __pyx_1 = __Pyx_PyObject_IsTrue(__pyx_2); if (unlikely(__pyx_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + if (!__pyx_1) break; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":79 + * state = newstate + * while states[state] == [(0, state)]: + * self.pop() # <<<<<<<<<<<<<< + * if not self.stack: + * # Done parsing! + */ + ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->pop(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":80 + * while states[state] == [(0, state)]: + * self.pop() + * if not self.stack: # <<<<<<<<<<<<<< + * # Done parsing! + * return True + */ + __pyx_1 = __Pyx_PyObject_IsTrue(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack); if (unlikely(__pyx_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 80; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_8 = (!__pyx_1); + if (__pyx_8) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":82 + * if not self.stack: + * # Done parsing! + * return True # <<<<<<<<<<<<<< + * dfa, state, node = self.stack[-1] + * states, first = dfa + */ + __pyx_7 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 82; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_r = __pyx_7; + __pyx_7 = 0; + Py_DECREF(__pyx_3); __pyx_3 = 0; + goto __pyx_L0; + goto __pyx_L12; + } + __pyx_L12:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":83 + * # Done parsing! + * return True + * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<< + * states, first = dfa + * # Done with this token + */ + __pyx_4 = __Pyx_GetItemInt(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack, -1, 0); if (!__pyx_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyTuple_CheckExact(__pyx_4) && PyTuple_GET_SIZE(__pyx_4) == 3) { + PyObject* tuple = __pyx_4; + __pyx_7 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_7); + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_7; + __pyx_7 = 0; + __pyx_7 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_7); + __pyx_5 = __pyx_PyInt_int(__pyx_7); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_v_state = __pyx_5; + __pyx_7 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_7); + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_7; + __pyx_7 = 0; + Py_DECREF(__pyx_4); __pyx_4 = 0; + } + else { + __pyx_2 = PyObject_GetIter(__pyx_4); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + __pyx_7 = __Pyx_UnpackItem(__pyx_2, 0); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_7; + __pyx_7 = 0; + __pyx_7 = __Pyx_UnpackItem(__pyx_2, 1); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_5 = __pyx_PyInt_int(__pyx_7); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + __pyx_v_state = __pyx_5; + __pyx_7 = __Pyx_UnpackItem(__pyx_2, 2); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_7; + __pyx_7 = 0; + if (__Pyx_EndUnpack(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":84 + * return True + * dfa, state, node = self.stack[-1] + * states, first = dfa # <<<<<<<<<<<<<< + * # Done with this token + * return False + */ + if (PyTuple_CheckExact(__pyx_v_dfa) && PyTuple_GET_SIZE(__pyx_v_dfa) == 2) { + PyObject* tuple = __pyx_v_dfa; + __pyx_4 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_4); + Py_DECREF(__pyx_v_states); + __pyx_v_states = __pyx_4; + __pyx_4 = 0; + __pyx_2 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_first); + __pyx_v_first = __pyx_2; + __pyx_2 = 0; + } + else { + __pyx_7 = PyObject_GetIter(__pyx_v_dfa); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_4 = __Pyx_UnpackItem(__pyx_7, 0); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_states); + __pyx_v_states = __pyx_4; + __pyx_4 = 0; + __pyx_2 = __Pyx_UnpackItem(__pyx_7, 1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_first); + __pyx_v_first = __pyx_2; + __pyx_2 = 0; + if (__Pyx_EndUnpack(__pyx_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 84; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + } + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":86 + * states, first = dfa + * # Done with this token + * return False # <<<<<<<<<<<<<< + * elif t >= 256: + * # See if it's a symbol and if we're in its first set + */ + __pyx_4 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_r = __pyx_4; + __pyx_4 = 0; + Py_DECREF(__pyx_3); __pyx_3 = 0; + goto __pyx_L0; + goto __pyx_L9; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":87 + * # Done with this token + * return False + * elif t >= 256: # <<<<<<<<<<<<<< + * # See if it's a symbol and if we're in its first set + * itsdfa = self._grammar_dfas[t] + */ + __pyx_1 = (__pyx_v_t >= 256); + if (__pyx_1) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":89 + * elif t >= 256: + * # See if it's a symbol and if we're in its first set + * itsdfa = self._grammar_dfas[t] # <<<<<<<<<<<<<< + * itsstates, itsfirst = itsdfa + * if ilabel in itsfirst: + */ + __pyx_2 = __Pyx_GetItemInt(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas, __pyx_v_t, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 89; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_itsdfa); + __pyx_v_itsdfa = __pyx_2; + __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":90 + * # See if it's a symbol and if we're in its first set + * itsdfa = self._grammar_dfas[t] + * itsstates, itsfirst = itsdfa # <<<<<<<<<<<<<< + * if ilabel in itsfirst: + * # Push a symbol + */ + if (PyTuple_CheckExact(__pyx_v_itsdfa) && PyTuple_GET_SIZE(__pyx_v_itsdfa) == 2) { + PyObject* tuple = __pyx_v_itsdfa; + __pyx_4 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_4); + Py_DECREF(__pyx_v_itsstates); + __pyx_v_itsstates = __pyx_4; + __pyx_4 = 0; + __pyx_2 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_itsfirst); + __pyx_v_itsfirst = __pyx_2; + __pyx_2 = 0; + } + else { + __pyx_7 = PyObject_GetIter(__pyx_v_itsdfa); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_4 = __Pyx_UnpackItem(__pyx_7, 0); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_itsstates); + __pyx_v_itsstates = __pyx_4; + __pyx_4 = 0; + __pyx_2 = __Pyx_UnpackItem(__pyx_7, 1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_itsfirst); + __pyx_v_itsfirst = __pyx_2; + __pyx_2 = 0; + if (__Pyx_EndUnpack(__pyx_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":91 + * itsdfa = self._grammar_dfas[t] + * itsstates, itsfirst = itsdfa + * if ilabel in itsfirst: # <<<<<<<<<<<<<< + * # Push a symbol + * self.push(t, itsdfa, newstate, context) + */ + __pyx_4 = PyInt_FromLong(__pyx_v_ilabel); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_8 = (PySequence_Contains(__pyx_v_itsfirst, __pyx_4)); if (unlikely(__pyx_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + if (__pyx_8) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":93 + * if ilabel in itsfirst: + * # Push a symbol + * self.push(t, itsdfa, newstate, context) # <<<<<<<<<<<<<< + * break # To continue the outer while loop + * else: + */ + __pyx_2 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_7 = PyInt_FromLong(__pyx_v_newstate); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 93; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->push(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self), __pyx_2, __pyx_v_itsdfa, __pyx_7, __pyx_v_context); + Py_DECREF(__pyx_2); __pyx_2 = 0; + Py_DECREF(__pyx_7); __pyx_7 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":94 + * # Push a symbol + * self.push(t, itsdfa, newstate, context) + * break # To continue the outer while loop # <<<<<<<<<<<<<< + * else: + * if (0, state) in arcs: + */ + goto __pyx_L8; + goto __pyx_L13; + } + __pyx_L13:; + goto __pyx_L9; + } + __pyx_L9:; + } + /*else*/ { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":96 + * break # To continue the outer while loop + * else: + * if (0, state) in arcs: # <<<<<<<<<<<<<< + * # An accepting state, pop it and try something else + * self.pop() + */ + __pyx_4 = PyInt_FromLong(__pyx_v_state); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = PyTuple_New(2); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_2, 0, __pyx_int_0); + PyTuple_SET_ITEM(__pyx_2, 1, __pyx_4); + __pyx_4 = 0; + __pyx_1 = (PySequence_Contains(__pyx_v_arcs, ((PyObject *)__pyx_2))); if (unlikely(__pyx_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_2)); __pyx_2 = 0; + if (__pyx_1) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":98 + * if (0, state) in arcs: + * # An accepting state, pop it and try something else + * self.pop() # <<<<<<<<<<<<<< + * if not self.stack: + * # Done parsing, but another token is input + */ + ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->pop(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":99 + * # An accepting state, pop it and try something else + * self.pop() + * if not self.stack: # <<<<<<<<<<<<<< + * # Done parsing, but another token is input + * raise ParseError("too much input", + */ + __pyx_8 = __Pyx_PyObject_IsTrue(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack); if (unlikely(__pyx_8 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_1 = (!__pyx_8); + if (__pyx_1) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":101 + * if not self.stack: + * # Done parsing, but another token is input + * raise ParseError("too much input", # <<<<<<<<<<<<<< + * type, value, context) + * else: + */ + __pyx_7 = __Pyx_GetName(__pyx_m, __pyx_kp_ParseError); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":102 + * # Done parsing, but another token is input + * raise ParseError("too much input", + * type, value, context) # <<<<<<<<<<<<<< + * else: + * # No success finding a transition + */ + __pyx_4 = PyTuple_New(4); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_kp_5); + PyTuple_SET_ITEM(__pyx_4, 0, __pyx_kp_5); + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_4, 1, __pyx_v_type); + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_4, 2, __pyx_v_value); + Py_INCREF(__pyx_v_context); + PyTuple_SET_ITEM(__pyx_4, 3, __pyx_v_context); + __pyx_2 = PyObject_Call(__pyx_7, ((PyObject *)__pyx_4), NULL); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + Py_DECREF(((PyObject *)__pyx_4)); __pyx_4 = 0; + __Pyx_Raise(__pyx_2, 0, 0); + Py_DECREF(__pyx_2); __pyx_2 = 0; + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + goto __pyx_L15; + } + __pyx_L15:; + goto __pyx_L14; + } + /*else*/ { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":105 + * else: + * # No success finding a transition + * raise ParseError("bad input", type, value, context) # <<<<<<<<<<<<<< + * + * cdef int classify(self, type, value, context): + */ + __pyx_7 = __Pyx_GetName(__pyx_m, __pyx_kp_ParseError); if (unlikely(!__pyx_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_4 = PyTuple_New(4); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_kp_6); + PyTuple_SET_ITEM(__pyx_4, 0, __pyx_kp_6); + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_4, 1, __pyx_v_type); + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_4, 2, __pyx_v_value); + Py_INCREF(__pyx_v_context); + PyTuple_SET_ITEM(__pyx_4, 3, __pyx_v_context); + __pyx_2 = PyObject_Call(__pyx_7, ((PyObject *)__pyx_4), NULL); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_7); __pyx_7 = 0; + Py_DECREF(((PyObject *)__pyx_4)); __pyx_4 = 0; + __Pyx_Raise(__pyx_2, 0, 0); + Py_DECREF(__pyx_2); __pyx_2 = 0; + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 105; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + } + __pyx_L14:; + } + __pyx_L8:; + Py_DECREF(__pyx_3); __pyx_3 = 0; + } + + __pyx_r = Py_None; Py_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + Py_XDECREF(__pyx_4); + Py_XDECREF(__pyx_7); + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.addtoken"); + __pyx_r = NULL; + __pyx_L0:; + Py_DECREF(__pyx_v_dfa); + Py_DECREF(__pyx_v_node); + Py_DECREF(__pyx_v_states); + Py_DECREF(__pyx_v_first); + Py_DECREF(__pyx_v_arcs); + Py_DECREF(__pyx_v_v); + Py_DECREF(__pyx_v_itsdfa); + Py_DECREF(__pyx_v_itsstates); + Py_DECREF(__pyx_v_itsfirst); + return __pyx_r; +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":107 + * raise ParseError("bad input", type, value, context) + * + * cdef int classify(self, type, value, context): # <<<<<<<<<<<<<< + * """Turn a token into a label. (Internal)""" + * if type == NAME: + */ + +static int __pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_classify(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self, PyObject *__pyx_v_type, PyObject *__pyx_v_value, PyObject *__pyx_v_context) { + PyObject *__pyx_v_ilabel; + int __pyx_r; + PyObject *__pyx_1 = 0; + int __pyx_2; + PyObject *__pyx_3 = 0; + PyObject *__pyx_4 = 0; + int __pyx_5; + __pyx_v_ilabel = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":109 + * cdef int classify(self, type, value, context): + * """Turn a token into a label. (Internal)""" + * if type == NAME: # <<<<<<<<<<<<<< + * # Keep a listing of all used names + * self.used_names.add(value) + */ + __pyx_1 = PyObject_RichCompare(__pyx_v_type, __pyx_int_1, Py_EQ); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = __Pyx_PyObject_IsTrue(__pyx_1); if (unlikely(__pyx_2 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + if (__pyx_2) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":111 + * if type == NAME: + * # Keep a listing of all used names + * self.used_names.add(value) # <<<<<<<<<<<<<< + * # Check for reserved words + * ilabel = self._grammar_keywords.get(value) + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_self->used_names, __pyx_kp_add); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = PyTuple_New(1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_value); + __pyx_4 = PyObject_Call(__pyx_1, ((PyObject *)__pyx_3), NULL); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_3)); __pyx_3 = 0; + Py_DECREF(__pyx_4); __pyx_4 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":113 + * self.used_names.add(value) + * # Check for reserved words + * ilabel = self._grammar_keywords.get(value) # <<<<<<<<<<<<<< + * if ilabel is not None: + * return ilabel + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_self->_grammar_keywords, __pyx_kp_get); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = PyTuple_New(1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_value); + __pyx_4 = PyObject_Call(__pyx_1, ((PyObject *)__pyx_3), NULL); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_3)); __pyx_3 = 0; + Py_DECREF(__pyx_v_ilabel); + __pyx_v_ilabel = __pyx_4; + __pyx_4 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":114 + * # Check for reserved words + * ilabel = self._grammar_keywords.get(value) + * if ilabel is not None: # <<<<<<<<<<<<<< + * return ilabel + * ilabel = self._grammar_tokens.get(type) + */ + __pyx_2 = (__pyx_v_ilabel != Py_None); + if (__pyx_2) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":115 + * ilabel = self._grammar_keywords.get(value) + * if ilabel is not None: + * return ilabel # <<<<<<<<<<<<<< + * ilabel = self._grammar_tokens.get(type) + * if ilabel is None: + */ + __pyx_5 = __pyx_PyInt_int(__pyx_v_ilabel); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_r = __pyx_5; + goto __pyx_L0; + goto __pyx_L4; + } + __pyx_L4:; + goto __pyx_L3; + } + __pyx_L3:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":116 + * if ilabel is not None: + * return ilabel + * ilabel = self._grammar_tokens.get(type) # <<<<<<<<<<<<<< + * if ilabel is None: + * raise ParseError("bad token", type, value, context) + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_self->_grammar_tokens, __pyx_kp_get); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = PyTuple_New(1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_type); + __pyx_4 = PyObject_Call(__pyx_1, ((PyObject *)__pyx_3), NULL); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_3)); __pyx_3 = 0; + Py_DECREF(__pyx_v_ilabel); + __pyx_v_ilabel = __pyx_4; + __pyx_4 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":117 + * return ilabel + * ilabel = self._grammar_tokens.get(type) + * if ilabel is None: # <<<<<<<<<<<<<< + * raise ParseError("bad token", type, value, context) + * return ilabel + */ + __pyx_2 = (__pyx_v_ilabel == Py_None); + if (__pyx_2) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":118 + * ilabel = self._grammar_tokens.get(type) + * if ilabel is None: + * raise ParseError("bad token", type, value, context) # <<<<<<<<<<<<<< + * return ilabel + * + */ + __pyx_1 = __Pyx_GetName(__pyx_m, __pyx_kp_ParseError); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = PyTuple_New(4); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_kp_7); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_kp_7); + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_3, 1, __pyx_v_type); + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_value); + Py_INCREF(__pyx_v_context); + PyTuple_SET_ITEM(__pyx_3, 3, __pyx_v_context); + __pyx_4 = PyObject_Call(__pyx_1, ((PyObject *)__pyx_3), NULL); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_3)); __pyx_3 = 0; + __Pyx_Raise(__pyx_4, 0, 0); + Py_DECREF(__pyx_4); __pyx_4 = 0; + {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + goto __pyx_L5; + } + __pyx_L5:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":119 + * if ilabel is None: + * raise ParseError("bad token", type, value, context) + * return ilabel # <<<<<<<<<<<<<< + * + * cdef void shift(self, type, value, newstate, context): + */ + __pyx_5 = __pyx_PyInt_int(__pyx_v_ilabel); if (unlikely((__pyx_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_r = __pyx_5; + goto __pyx_L0; + + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_3); + Py_XDECREF(__pyx_4); + __Pyx_WriteUnraisable("sphinx.pycode.pgen2.parse.Parser.classify"); + __pyx_r = 0; + __pyx_L0:; + Py_DECREF(__pyx_v_ilabel); + return __pyx_r; +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":121 + * return ilabel + * + * cdef void shift(self, type, value, newstate, context): # <<<<<<<<<<<<<< + * """Shift a token. (Internal)""" + * dfa, state, node = self.stack[-1] + */ + +static void __pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_shift(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self, PyObject *__pyx_v_type, PyObject *__pyx_v_value, PyObject *__pyx_v_newstate, PyObject *__pyx_v_context) { + PyObject *__pyx_v_dfa; + PyObject *__pyx_v_state; + PyObject *__pyx_v_node; + PyObject *__pyx_v_newnode; + PyObject *__pyx_1 = 0; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + int __pyx_4; + __pyx_v_dfa = Py_None; Py_INCREF(Py_None); + __pyx_v_state = Py_None; Py_INCREF(Py_None); + __pyx_v_node = Py_None; Py_INCREF(Py_None); + __pyx_v_newnode = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":123 + * cdef void shift(self, type, value, newstate, context): + * """Shift a token. (Internal)""" + * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<< + * newnode = (type, value, context, None) + * newnode = self.convert(newnode) + */ + __pyx_1 = __Pyx_GetItemInt(__pyx_v_self->stack, -1, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyTuple_CheckExact(__pyx_1) && PyTuple_GET_SIZE(__pyx_1) == 3) { + PyObject* tuple = __pyx_1; + __pyx_3 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_state); + __pyx_v_state = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_3; + __pyx_3 = 0; + Py_DECREF(__pyx_1); __pyx_1 = 0; + } + else { + __pyx_2 = PyObject_GetIter(__pyx_1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_2, 0); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_2, 1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_state); + __pyx_v_state = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_2, 2); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_3; + __pyx_3 = 0; + if (__Pyx_EndUnpack(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":124 + * """Shift a token. (Internal)""" + * dfa, state, node = self.stack[-1] + * newnode = (type, value, context, None) # <<<<<<<<<<<<<< + * newnode = self.convert(newnode) + * if newnode is not None: + */ + __pyx_3 = PyTuple_New(4); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_type); + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_3, 1, __pyx_v_value); + Py_INCREF(__pyx_v_context); + PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_context); + Py_INCREF(Py_None); + PyTuple_SET_ITEM(__pyx_3, 3, Py_None); + Py_DECREF(__pyx_v_newnode); + __pyx_v_newnode = ((PyObject *)__pyx_3); + __pyx_3 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":125 + * dfa, state, node = self.stack[-1] + * newnode = (type, value, context, None) + * newnode = self.convert(newnode) # <<<<<<<<<<<<<< + * if newnode is not None: + * node[-1].append(newnode) + */ + __pyx_1 = ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self->__pyx_vtab)->convert(__pyx_v_self, __pyx_v_newnode); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_newnode); + __pyx_v_newnode = __pyx_1; + __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":126 + * newnode = (type, value, context, None) + * newnode = self.convert(newnode) + * if newnode is not None: # <<<<<<<<<<<<<< + * node[-1].append(newnode) + * self.stack[-1] = (dfa, newstate, node) + */ + __pyx_4 = (__pyx_v_newnode != Py_None); + if (__pyx_4) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":127 + * newnode = self.convert(newnode) + * if newnode is not None: + * node[-1].append(newnode) # <<<<<<<<<<<<<< + * self.stack[-1] = (dfa, newstate, node) + * + */ + __pyx_2 = __Pyx_GetItemInt(__pyx_v_node, -1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = __Pyx_PyObject_Append(__pyx_2, __pyx_v_newnode); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + Py_DECREF(__pyx_3); __pyx_3 = 0; + goto __pyx_L3; + } + __pyx_L3:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":128 + * if newnode is not None: + * node[-1].append(newnode) + * self.stack[-1] = (dfa, newstate, node) # <<<<<<<<<<<<<< + * + * cdef void push(self, type, newdfa, newstate, context): + */ + __pyx_1 = PyTuple_New(3); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_dfa); + PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_dfa); + Py_INCREF(__pyx_v_newstate); + PyTuple_SET_ITEM(__pyx_1, 1, __pyx_v_newstate); + Py_INCREF(__pyx_v_node); + PyTuple_SET_ITEM(__pyx_1, 2, __pyx_v_node); + if (__Pyx_SetItemInt(__pyx_v_self->stack, -1, ((PyObject *)__pyx_1), 0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 128; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_1)); __pyx_1 = 0; + + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + __Pyx_WriteUnraisable("sphinx.pycode.pgen2.parse.Parser.shift"); + __pyx_L0:; + Py_DECREF(__pyx_v_dfa); + Py_DECREF(__pyx_v_state); + Py_DECREF(__pyx_v_node); + Py_DECREF(__pyx_v_newnode); +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":130 + * self.stack[-1] = (dfa, newstate, node) + * + * cdef void push(self, type, newdfa, newstate, context): # <<<<<<<<<<<<<< + * """Push a nonterminal. (Internal)""" + * dfa, state, node = self.stack[-1] + */ + +static void __pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_push(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self, PyObject *__pyx_v_type, PyObject *__pyx_v_newdfa, PyObject *__pyx_v_newstate, PyObject *__pyx_v_context) { + PyObject *__pyx_v_dfa; + PyObject *__pyx_v_state; + PyObject *__pyx_v_node; + PyObject *__pyx_v_newnode; + PyObject *__pyx_1 = 0; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + __pyx_v_dfa = Py_None; Py_INCREF(Py_None); + __pyx_v_state = Py_None; Py_INCREF(Py_None); + __pyx_v_node = Py_None; Py_INCREF(Py_None); + __pyx_v_newnode = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":132 + * cdef void push(self, type, newdfa, newstate, context): + * """Push a nonterminal. (Internal)""" + * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<< + * newnode = (type, None, context, []) + * self.stack[-1] = (dfa, newstate, node) + */ + __pyx_1 = __Pyx_GetItemInt(__pyx_v_self->stack, -1, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyTuple_CheckExact(__pyx_1) && PyTuple_GET_SIZE(__pyx_1) == 3) { + PyObject* tuple = __pyx_1; + __pyx_3 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_state); + __pyx_v_state = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_3; + __pyx_3 = 0; + Py_DECREF(__pyx_1); __pyx_1 = 0; + } + else { + __pyx_2 = PyObject_GetIter(__pyx_1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_2, 0); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_2, 1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_state); + __pyx_v_state = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_2, 2); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_3; + __pyx_3 = 0; + if (__Pyx_EndUnpack(__pyx_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":133 + * """Push a nonterminal. (Internal)""" + * dfa, state, node = self.stack[-1] + * newnode = (type, None, context, []) # <<<<<<<<<<<<<< + * self.stack[-1] = (dfa, newstate, node) + * self.stack.append((newdfa, 0, newnode)) + */ + __pyx_3 = PyList_New(0); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_1 = PyTuple_New(4); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_type); + Py_INCREF(Py_None); + PyTuple_SET_ITEM(__pyx_1, 1, Py_None); + Py_INCREF(__pyx_v_context); + PyTuple_SET_ITEM(__pyx_1, 2, __pyx_v_context); + PyTuple_SET_ITEM(__pyx_1, 3, ((PyObject *)__pyx_3)); + __pyx_3 = 0; + Py_DECREF(__pyx_v_newnode); + __pyx_v_newnode = ((PyObject *)__pyx_1); + __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":134 + * dfa, state, node = self.stack[-1] + * newnode = (type, None, context, []) + * self.stack[-1] = (dfa, newstate, node) # <<<<<<<<<<<<<< + * self.stack.append((newdfa, 0, newnode)) + * + */ + __pyx_2 = PyTuple_New(3); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_dfa); + PyTuple_SET_ITEM(__pyx_2, 0, __pyx_v_dfa); + Py_INCREF(__pyx_v_newstate); + PyTuple_SET_ITEM(__pyx_2, 1, __pyx_v_newstate); + Py_INCREF(__pyx_v_node); + PyTuple_SET_ITEM(__pyx_2, 2, __pyx_v_node); + if (__Pyx_SetItemInt(__pyx_v_self->stack, -1, ((PyObject *)__pyx_2), 0) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_2)); __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":135 + * newnode = (type, None, context, []) + * self.stack[-1] = (dfa, newstate, node) + * self.stack.append((newdfa, 0, newnode)) # <<<<<<<<<<<<<< + * + * cdef void pop(self): + */ + __pyx_3 = PyTuple_New(3); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_newdfa); + PyTuple_SET_ITEM(__pyx_3, 0, __pyx_v_newdfa); + Py_INCREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_3, 1, __pyx_int_0); + Py_INCREF(__pyx_v_newnode); + PyTuple_SET_ITEM(__pyx_3, 2, __pyx_v_newnode); + __pyx_1 = __Pyx_PyObject_Append(__pyx_v_self->stack, ((PyObject *)__pyx_3)); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_3)); __pyx_3 = 0; + Py_DECREF(__pyx_1); __pyx_1 = 0; + + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + __Pyx_WriteUnraisable("sphinx.pycode.pgen2.parse.Parser.push"); + __pyx_L0:; + Py_DECREF(__pyx_v_dfa); + Py_DECREF(__pyx_v_state); + Py_DECREF(__pyx_v_node); + Py_DECREF(__pyx_v_newnode); +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":137 + * self.stack.append((newdfa, 0, newnode)) + * + * cdef void pop(self): # <<<<<<<<<<<<<< + * """Pop a nonterminal. (Internal)""" + * popdfa, popstate, popnode = self.stack.pop() + */ + +static void __pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_pop(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self) { + PyObject *__pyx_v_popdfa; + PyObject *__pyx_v_popstate; + PyObject *__pyx_v_popnode; + PyObject *__pyx_v_newnode; + PyObject *__pyx_v_dfa; + PyObject *__pyx_v_state; + PyObject *__pyx_v_node; + PyObject *__pyx_1 = 0; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + int __pyx_4; + __pyx_v_popdfa = Py_None; Py_INCREF(Py_None); + __pyx_v_popstate = Py_None; Py_INCREF(Py_None); + __pyx_v_popnode = Py_None; Py_INCREF(Py_None); + __pyx_v_newnode = Py_None; Py_INCREF(Py_None); + __pyx_v_dfa = Py_None; Py_INCREF(Py_None); + __pyx_v_state = Py_None; Py_INCREF(Py_None); + __pyx_v_node = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":139 + * cdef void pop(self): + * """Pop a nonterminal. (Internal)""" + * popdfa, popstate, popnode = self.stack.pop() # <<<<<<<<<<<<<< + * newnode = self.convert(popnode) + * if newnode is not None: + */ + __pyx_1 = PyObject_GetAttr(__pyx_v_self->stack, __pyx_kp_pop); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = PyObject_Call(__pyx_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + if (PyTuple_CheckExact(__pyx_2) && PyTuple_GET_SIZE(__pyx_2) == 3) { + PyObject* tuple = __pyx_2; + __pyx_3 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_popdfa); + __pyx_v_popdfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_popstate); + __pyx_v_popstate = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_popnode); + __pyx_v_popnode = __pyx_3; + __pyx_3 = 0; + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + else { + __pyx_1 = PyObject_GetIter(__pyx_2); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_1, 0); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_popdfa); + __pyx_v_popdfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_1, 1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_popstate); + __pyx_v_popstate = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_1, 2); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_popnode); + __pyx_v_popnode = __pyx_3; + __pyx_3 = 0; + if (__Pyx_EndUnpack(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":140 + * """Pop a nonterminal. (Internal)""" + * popdfa, popstate, popnode = self.stack.pop() + * newnode = self.convert(popnode) # <<<<<<<<<<<<<< + * if newnode is not None: + * if self.stack: + */ + __pyx_3 = ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self->__pyx_vtab)->convert(__pyx_v_self, __pyx_v_popnode); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_newnode); + __pyx_v_newnode = __pyx_3; + __pyx_3 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":141 + * popdfa, popstate, popnode = self.stack.pop() + * newnode = self.convert(popnode) + * if newnode is not None: # <<<<<<<<<<<<<< + * if self.stack: + * dfa, state, node = self.stack[-1] + */ + __pyx_4 = (__pyx_v_newnode != Py_None); + if (__pyx_4) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":142 + * newnode = self.convert(popnode) + * if newnode is not None: + * if self.stack: # <<<<<<<<<<<<<< + * dfa, state, node = self.stack[-1] + * node[-1].append(newnode) + */ + __pyx_4 = __Pyx_PyObject_IsTrue(__pyx_v_self->stack); if (unlikely(__pyx_4 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__pyx_4) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":143 + * if newnode is not None: + * if self.stack: + * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<< + * node[-1].append(newnode) + * else: + */ + __pyx_2 = __Pyx_GetItemInt(__pyx_v_self->stack, -1, 0); if (!__pyx_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyTuple_CheckExact(__pyx_2) && PyTuple_GET_SIZE(__pyx_2) == 3) { + PyObject* tuple = __pyx_2; + __pyx_3 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_state); + __pyx_v_state = __pyx_3; + __pyx_3 = 0; + __pyx_3 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_3); + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_3; + __pyx_3 = 0; + Py_DECREF(__pyx_2); __pyx_2 = 0; + } + else { + __pyx_1 = PyObject_GetIter(__pyx_2); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_1, 0); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_dfa); + __pyx_v_dfa = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_1, 1); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_state); + __pyx_v_state = __pyx_3; + __pyx_3 = 0; + __pyx_3 = __Pyx_UnpackItem(__pyx_1, 2); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_node); + __pyx_v_node = __pyx_3; + __pyx_3 = 0; + if (__Pyx_EndUnpack(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 143; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":144 + * if self.stack: + * dfa, state, node = self.stack[-1] + * node[-1].append(newnode) # <<<<<<<<<<<<<< + * else: + * self.rootnode = newnode + */ + __pyx_3 = __Pyx_GetItemInt(__pyx_v_node, -1, 0); if (!__pyx_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = __Pyx_PyObject_Append(__pyx_3, __pyx_v_newnode); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 144; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_3); __pyx_3 = 0; + Py_DECREF(__pyx_2); __pyx_2 = 0; + goto __pyx_L4; + } + /*else*/ { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":146 + * node[-1].append(newnode) + * else: + * self.rootnode = newnode # <<<<<<<<<<<<<< + * self.rootnode.used_names = self.used_names + * + */ + Py_INCREF(__pyx_v_newnode); + Py_DECREF(__pyx_v_self->rootnode); + __pyx_v_self->rootnode = __pyx_v_newnode; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":147 + * else: + * self.rootnode = newnode + * self.rootnode.used_names = self.used_names # <<<<<<<<<<<<<< + * + * cdef convert(self, raw_node): + */ + if (PyObject_SetAttr(__pyx_v_self->rootnode, __pyx_kp_used_names, __pyx_v_self->used_names) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + } + __pyx_L4:; + goto __pyx_L3; + } + __pyx_L3:; + + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + __Pyx_WriteUnraisable("sphinx.pycode.pgen2.parse.Parser.pop"); + __pyx_L0:; + Py_DECREF(__pyx_v_popdfa); + Py_DECREF(__pyx_v_popstate); + Py_DECREF(__pyx_v_popnode); + Py_DECREF(__pyx_v_newnode); + Py_DECREF(__pyx_v_dfa); + Py_DECREF(__pyx_v_state); + Py_DECREF(__pyx_v_node); +} + +/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":149 + * self.rootnode.used_names = self.used_names + * + * cdef convert(self, raw_node): # <<<<<<<<<<<<<< + * type, value, context, children = raw_node + * if children or type in self._grammar_number2symbol: + */ + +static PyObject *__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_convert(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self, PyObject *__pyx_v_raw_node) { + PyObject *__pyx_v_type; + PyObject *__pyx_v_value; + PyObject *__pyx_v_context; + PyObject *__pyx_v_children; + PyObject *__pyx_r; + PyObject *__pyx_1 = 0; + PyObject *__pyx_2 = 0; + int __pyx_3; + Py_ssize_t __pyx_4 = 0; + PyObject *__pyx_5 = 0; + PyObject *__pyx_6 = 0; + __pyx_v_type = Py_None; Py_INCREF(Py_None); + __pyx_v_value = Py_None; Py_INCREF(Py_None); + __pyx_v_context = Py_None; Py_INCREF(Py_None); + __pyx_v_children = Py_None; Py_INCREF(Py_None); + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":150 + * + * cdef convert(self, raw_node): + * type, value, context, children = raw_node # <<<<<<<<<<<<<< + * if children or type in self._grammar_number2symbol: + * # If there's exactly one child, return that child instead of + */ + if (PyTuple_CheckExact(__pyx_v_raw_node) && PyTuple_GET_SIZE(__pyx_v_raw_node) == 4) { + PyObject* tuple = __pyx_v_raw_node; + __pyx_2 = PyTuple_GET_ITEM(tuple, 0); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_type); + __pyx_v_type = __pyx_2; + __pyx_2 = 0; + __pyx_2 = PyTuple_GET_ITEM(tuple, 1); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_value); + __pyx_v_value = __pyx_2; + __pyx_2 = 0; + __pyx_2 = PyTuple_GET_ITEM(tuple, 2); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_context); + __pyx_v_context = __pyx_2; + __pyx_2 = 0; + __pyx_2 = PyTuple_GET_ITEM(tuple, 3); + Py_INCREF(__pyx_2); + Py_DECREF(__pyx_v_children); + __pyx_v_children = __pyx_2; + __pyx_2 = 0; + } + else { + __pyx_1 = PyObject_GetIter(__pyx_v_raw_node); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = __Pyx_UnpackItem(__pyx_1, 0); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_type); + __pyx_v_type = __pyx_2; + __pyx_2 = 0; + __pyx_2 = __Pyx_UnpackItem(__pyx_1, 1); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_value); + __pyx_v_value = __pyx_2; + __pyx_2 = 0; + __pyx_2 = __Pyx_UnpackItem(__pyx_1, 2); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_context); + __pyx_v_context = __pyx_2; + __pyx_2 = 0; + __pyx_2 = __Pyx_UnpackItem(__pyx_1, 3); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_v_children); + __pyx_v_children = __pyx_2; + __pyx_2 = 0; + if (__Pyx_EndUnpack(__pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + } + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":151 + * cdef convert(self, raw_node): + * type, value, context, children = raw_node + * if children or type in self._grammar_number2symbol: # <<<<<<<<<<<<<< + * # If there's exactly one child, return that child instead of + * # creating a new node. + */ + __pyx_2 = __pyx_v_children; + Py_INCREF(__pyx_2); + __pyx_3 = __Pyx_PyObject_IsTrue(__pyx_2); if (unlikely(__pyx_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (!__pyx_3) { + Py_DECREF(__pyx_2); __pyx_2 = 0; + __pyx_3 = (PySequence_Contains(__pyx_v_self->_grammar_number2symbol, __pyx_v_type)); if (unlikely(__pyx_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_2 = __Pyx_PyBool_FromLong(__pyx_3); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + } + __pyx_3 = __Pyx_PyObject_IsTrue(__pyx_2); if (unlikely(__pyx_3 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + if (__pyx_3) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":154 + * # If there's exactly one child, return that child instead of + * # creating a new node. + * if len(children) == 1: # <<<<<<<<<<<<<< + * return children[0] + * return Node(type, children, context=context) + */ + __pyx_4 = PyObject_Length(__pyx_v_children); if (unlikely(__pyx_4 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = (__pyx_4 == 1); + if (__pyx_3) { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":155 + * # creating a new node. + * if len(children) == 1: + * return children[0] # <<<<<<<<<<<<<< + * return Node(type, children, context=context) + * else: + */ + __pyx_1 = __Pyx_GetItemInt(__pyx_v_children, 0, 0); if (!__pyx_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 155; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_r = __pyx_1; + __pyx_1 = 0; + goto __pyx_L0; + goto __pyx_L4; + } + __pyx_L4:; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":156 + * if len(children) == 1: + * return children[0] + * return Node(type, children, context=context) # <<<<<<<<<<<<<< + * else: + * return Leaf(type, value, context=context) + */ + __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_kp_Node); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_1 = PyTuple_New(2); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_type); + Py_INCREF(__pyx_v_children); + PyTuple_SET_ITEM(__pyx_1, 1, __pyx_v_children); + __pyx_5 = PyDict_New(); if (unlikely(!__pyx_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyDict_SetItem(__pyx_5, __pyx_kp_context, __pyx_v_context) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_6 = PyEval_CallObjectWithKeywords(__pyx_2, ((PyObject *)__pyx_1), ((PyObject *)__pyx_5)); if (unlikely(!__pyx_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + Py_DECREF(((PyObject *)__pyx_1)); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_5)); __pyx_5 = 0; + __pyx_r = __pyx_6; + __pyx_6 = 0; + goto __pyx_L0; + goto __pyx_L3; + } + /*else*/ { + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":158 + * return Node(type, children, context=context) + * else: + * return Leaf(type, value, context=context) # <<<<<<<<<<<<<< + */ + __pyx_2 = __Pyx_GetName(__pyx_m, __pyx_kp_Leaf); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_1 = PyTuple_New(2); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_v_type); + PyTuple_SET_ITEM(__pyx_1, 0, __pyx_v_type); + Py_INCREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_1, 1, __pyx_v_value); + __pyx_5 = PyDict_New(); if (unlikely(!__pyx_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyDict_SetItem(__pyx_5, __pyx_kp_context, __pyx_v_context) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_6 = PyEval_CallObjectWithKeywords(__pyx_2, ((PyObject *)__pyx_1), ((PyObject *)__pyx_5)); if (unlikely(!__pyx_6)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_2); __pyx_2 = 0; + Py_DECREF(((PyObject *)__pyx_1)); __pyx_1 = 0; + Py_DECREF(((PyObject *)__pyx_5)); __pyx_5 = 0; + __pyx_r = __pyx_6; + __pyx_6 = 0; + goto __pyx_L0; + } + __pyx_L3:; + + __pyx_r = Py_None; Py_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_5); + Py_XDECREF(__pyx_6); + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.convert"); + __pyx_r = 0; + __pyx_L0:; + Py_DECREF(__pyx_v_type); + Py_DECREF(__pyx_v_value); + Py_DECREF(__pyx_v_context); + Py_DECREF(__pyx_v_children); + return __pyx_r; +} +static struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser __pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser; + +static PyObject *__pyx_tp_new_6sphinx_6pycode_5pgen2_5parse_Parser(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *p; + PyObject *o = (*t->tp_alloc)(t, 0); + if (!o) return 0; + p = ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)o); + p->__pyx_vtab = __pyx_vtabptr_6sphinx_6pycode_5pgen2_5parse_Parser; + p->grammar = Py_None; Py_INCREF(Py_None); + p->stack = Py_None; Py_INCREF(Py_None); + p->rootnode = Py_None; Py_INCREF(Py_None); + p->used_names = Py_None; Py_INCREF(Py_None); + p->_grammar_dfas = Py_None; Py_INCREF(Py_None); + p->_grammar_labels = Py_None; Py_INCREF(Py_None); + p->_grammar_keywords = Py_None; Py_INCREF(Py_None); + p->_grammar_tokens = Py_None; Py_INCREF(Py_None); + p->_grammar_number2symbol = Py_None; Py_INCREF(Py_None); + return o; +} + +static void __pyx_tp_dealloc_6sphinx_6pycode_5pgen2_5parse_Parser(PyObject *o) { + struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *p = (struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)o; + Py_XDECREF(p->grammar); + Py_XDECREF(p->stack); + Py_XDECREF(p->rootnode); + Py_XDECREF(p->used_names); + Py_XDECREF(p->_grammar_dfas); + Py_XDECREF(p->_grammar_labels); + Py_XDECREF(p->_grammar_keywords); + Py_XDECREF(p->_grammar_tokens); + Py_XDECREF(p->_grammar_number2symbol); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_6sphinx_6pycode_5pgen2_5parse_Parser(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *p = (struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)o; + if (p->grammar) { + e = (*v)(p->grammar, a); if (e) return e; + } + if (p->stack) { + e = (*v)(p->stack, a); if (e) return e; + } + if (p->rootnode) { + e = (*v)(p->rootnode, a); if (e) return e; + } + if (p->used_names) { + e = (*v)(p->used_names, a); if (e) return e; + } + if (p->_grammar_dfas) { + e = (*v)(p->_grammar_dfas, a); if (e) return e; + } + if (p->_grammar_labels) { + e = (*v)(p->_grammar_labels, a); if (e) return e; + } + if (p->_grammar_keywords) { + e = (*v)(p->_grammar_keywords, a); if (e) return e; + } + if (p->_grammar_tokens) { + e = (*v)(p->_grammar_tokens, a); if (e) return e; + } + if (p->_grammar_number2symbol) { + e = (*v)(p->_grammar_number2symbol, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_6sphinx_6pycode_5pgen2_5parse_Parser(PyObject *o) { + struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *p = (struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)o; + PyObject* tmp; + tmp = ((PyObject*)p->grammar); + p->grammar = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->stack); + p->stack = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->rootnode); + p->rootnode = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->used_names); + p->used_names = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_grammar_dfas); + p->_grammar_dfas = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_grammar_labels); + p->_grammar_labels = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_grammar_keywords); + p->_grammar_keywords = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_grammar_tokens); + p->_grammar_tokens = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_grammar_number2symbol); + p->_grammar_number2symbol = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + return 0; +} + +static struct PyMethodDef __pyx_methods_6sphinx_6pycode_5pgen2_5parse_Parser[] = { + {"setup", (PyCFunction)__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_setup, METH_VARARGS|METH_KEYWORDS, 0}, + {"addtoken", (PyCFunction)__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken}, + {0, 0, 0, 0} +}; + +static struct PyMemberDef __pyx_members_6sphinx_6pycode_5pgen2_5parse_Parser[] = { + {"grammar", T_OBJECT, offsetof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser, grammar), 0, 0}, + {"stack", T_OBJECT, offsetof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser, stack), 0, 0}, + {"rootnode", T_OBJECT, offsetof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser, rootnode), 0, 0}, + {"used_names", T_OBJECT, offsetof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser, used_names), 0, 0}, + {0, 0, 0, 0, 0} +}; + +static PyNumberMethods __pyx_tp_as_number_Parser = { + 0, /*nb_add*/ + 0, /*nb_subtract*/ + 0, /*nb_multiply*/ + #if PY_MAJOR_VERSION < 3 + 0, /*nb_divide*/ + #endif + 0, /*nb_remainder*/ + 0, /*nb_divmod*/ + 0, /*nb_power*/ + 0, /*nb_negative*/ + 0, /*nb_positive*/ + 0, /*nb_absolute*/ + 0, /*nb_nonzero*/ + 0, /*nb_invert*/ + 0, /*nb_lshift*/ + 0, /*nb_rshift*/ + 0, /*nb_and*/ + 0, /*nb_xor*/ + 0, /*nb_or*/ + #if PY_MAJOR_VERSION < 3 + 0, /*nb_coerce*/ + #endif + 0, /*nb_int*/ + 0, /*nb_long*/ + 0, /*nb_float*/ + #if PY_MAJOR_VERSION < 3 + 0, /*nb_oct*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*nb_hex*/ + #endif + 0, /*nb_inplace_add*/ + 0, /*nb_inplace_subtract*/ + 0, /*nb_inplace_multiply*/ + #if PY_MAJOR_VERSION < 3 + 0, /*nb_inplace_divide*/ + #endif + 0, /*nb_inplace_remainder*/ + 0, /*nb_inplace_power*/ + 0, /*nb_inplace_lshift*/ + 0, /*nb_inplace_rshift*/ + 0, /*nb_inplace_and*/ + 0, /*nb_inplace_xor*/ + 0, /*nb_inplace_or*/ + 0, /*nb_floor_divide*/ + 0, /*nb_true_divide*/ + 0, /*nb_inplace_floor_divide*/ + 0, /*nb_inplace_true_divide*/ + #if (PY_MAJOR_VERSION >= 3) || (Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX) + 0, /*nb_index*/ + #endif +}; + +static PySequenceMethods __pyx_tp_as_sequence_Parser = { + 0, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + 0, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + 0, /*sq_contains*/ + 0, /*sq_inplace_concat*/ + 0, /*sq_inplace_repeat*/ +}; + +static PyMappingMethods __pyx_tp_as_mapping_Parser = { + 0, /*mp_length*/ + 0, /*mp_subscript*/ + 0, /*mp_ass_subscript*/ +}; + +static PyBufferProcs __pyx_tp_as_buffer_Parser = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + #if (PY_MAJOR_VERSION >= 3) || (Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_NEWBUFFER) + 0, /*bf_getbuffer*/ + #endif + #if (PY_MAJOR_VERSION >= 3) || (Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_NEWBUFFER) + 0, /*bf_releasebuffer*/ + #endif +}; + +PyTypeObject __pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser = { + PyVarObject_HEAD_INIT(0, 0) + "sphinx.pycode.pgen2.parse.Parser", /*tp_name*/ + sizeof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + &__pyx_tp_as_number_Parser, /*tp_as_number*/ + &__pyx_tp_as_sequence_Parser, /*tp_as_sequence*/ + &__pyx_tp_as_mapping_Parser, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + &__pyx_tp_as_buffer_Parser, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_traverse*/ + __pyx_tp_clear_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_methods*/ + __pyx_members_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser___init__, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ +}; + +static struct PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +static void __pyx_init_filenames(void); /*proto*/ + +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "parse", + 0, /* m_doc */ + -1, /* m_size */ + __pyx_methods /* m_methods */, + NULL, /* m_reload */ + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_kp___init__, __pyx_k___init__, sizeof(__pyx_k___init__), 0, 1, 1}, + {&__pyx_kp_setup, __pyx_k_setup, sizeof(__pyx_k_setup), 0, 1, 1}, + {&__pyx_kp_addtoken, __pyx_k_addtoken, sizeof(__pyx_k_addtoken), 0, 1, 1}, + {&__pyx_kp_1, __pyx_k_1, sizeof(__pyx_k_1), 1, 1, 1}, + {&__pyx_kp_Node, __pyx_k_Node, sizeof(__pyx_k_Node), 1, 1, 1}, + {&__pyx_kp_Leaf, __pyx_k_Leaf, sizeof(__pyx_k_Leaf), 1, 1, 1}, + {&__pyx_kp_ParseError, __pyx_k_ParseError, sizeof(__pyx_k_ParseError), 0, 1, 1}, + {&__pyx_kp_Exception, __pyx_k_Exception, sizeof(__pyx_k_Exception), 1, 1, 1}, + {&__pyx_kp_msg, __pyx_k_msg, sizeof(__pyx_k_msg), 1, 1, 1}, + {&__pyx_kp_type, __pyx_k_type, sizeof(__pyx_k_type), 1, 1, 1}, + {&__pyx_kp_value, __pyx_k_value, sizeof(__pyx_k_value), 1, 1, 1}, + {&__pyx_kp_context, __pyx_k_context, sizeof(__pyx_k_context), 1, 1, 1}, + {&__pyx_kp_dfas, __pyx_k_dfas, sizeof(__pyx_k_dfas), 1, 1, 1}, + {&__pyx_kp_labels, __pyx_k_labels, sizeof(__pyx_k_labels), 1, 1, 1}, + {&__pyx_kp_keywords, __pyx_k_keywords, sizeof(__pyx_k_keywords), 1, 1, 1}, + {&__pyx_kp_tokens, __pyx_k_tokens, sizeof(__pyx_k_tokens), 1, 1, 1}, + {&__pyx_kp_4, __pyx_k_4, sizeof(__pyx_k_4), 1, 1, 1}, + {&__pyx_kp_start, __pyx_k_start, sizeof(__pyx_k_start), 1, 1, 1}, + {&__pyx_kp_add, __pyx_k_add, sizeof(__pyx_k_add), 1, 1, 1}, + {&__pyx_kp_get, __pyx_k_get, sizeof(__pyx_k_get), 1, 1, 1}, + {&__pyx_kp_append, __pyx_k_append, sizeof(__pyx_k_append), 1, 1, 1}, + {&__pyx_kp_pop, __pyx_k_pop, sizeof(__pyx_k_pop), 1, 1, 1}, + {&__pyx_kp_used_names, __pyx_k_used_names, sizeof(__pyx_k_used_names), 1, 1, 1}, + {&__pyx_kp_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 0}, + {&__pyx_kp_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 0, 0}, + {&__pyx_kp_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 0, 0}, + {&__pyx_kp_6, __pyx_k_6, sizeof(__pyx_k_6), 0, 0, 0}, + {&__pyx_kp_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 0, 0}, + {0, 0, 0, 0, 0, 0} +}; +static int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_Exception = __Pyx_GetName(__pyx_b, __pyx_kp_Exception); if (!__pyx_builtin_Exception) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + return 0; + __pyx_L1_error:; + return -1; +} + +static int __Pyx_InitGlobals(void) { + __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + return 0; + __pyx_L1_error:; + return -1; +} + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC initparse(void); /*proto*/ +PyMODINIT_FUNC initparse(void) +#else +PyMODINIT_FUNC PyInit_parse(void); /*proto*/ +PyMODINIT_FUNC PyInit_parse(void) +#endif +{ + PyObject *__pyx_1 = 0; + PyObject *__pyx_2 = 0; + PyObject *__pyx_3 = 0; + PyObject *__pyx_4 = 0; + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + /*--- Libary function declarations ---*/ + __pyx_init_filenames(); + /*--- Initialize various global constants etc. ---*/ + if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + /*--- Module creation code ---*/ + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("parse", __pyx_methods, 0, 0, PYTHON_API_VERSION); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + #if PY_MAJOR_VERSION < 3 + Py_INCREF(__pyx_m); + #endif + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); + if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; + /*--- Builtin init code ---*/ + if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_skip_dispatch = 0; + /*--- Global init code ---*/ + /*--- Function export code ---*/ + /*--- Type init code ---*/ + __pyx_vtabptr_6sphinx_6pycode_5pgen2_5parse_Parser = &__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser; + *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.classify = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_classify; + *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.shift = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_shift; + *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.push = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_push; + *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.pop = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_pop; + *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.convert = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_convert; + if (PyType_Ready(&__pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (__Pyx_SetVtable(__pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser.tp_dict, __pyx_vtabptr_6sphinx_6pycode_5pgen2_5parse_Parser) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttrString(__pyx_m, "Parser", (PyObject *)&__pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_ptype_6sphinx_6pycode_5pgen2_5parse_Parser = &__pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser; + /*--- Type import code ---*/ + /*--- Function import code ---*/ + /*--- Execution code ---*/ + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":15 + * """ + * + * from sphinx.pycode.nodes import Node, Leaf # <<<<<<<<<<<<<< + * + * DEF NAME = 1 + */ + __pyx_1 = PyList_New(2); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_kp_Node); + PyList_SET_ITEM(__pyx_1, 0, __pyx_kp_Node); + Py_INCREF(__pyx_kp_Leaf); + PyList_SET_ITEM(__pyx_1, 1, __pyx_kp_Leaf); + __pyx_2 = __Pyx_Import(__pyx_kp_1, ((PyObject *)__pyx_1)); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_1)); __pyx_1 = 0; + __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_kp_Node); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(__pyx_m, __pyx_kp_Node, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + __pyx_1 = PyObject_GetAttr(__pyx_2, __pyx_kp_Leaf); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + if (PyObject_SetAttr(__pyx_m, __pyx_kp_Leaf, __pyx_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + Py_DECREF(__pyx_2); __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":19 + * DEF NAME = 1 + * + * class ParseError(Exception): # <<<<<<<<<<<<<< + * """Exception to signal the parser is stuck.""" + * + */ + __pyx_2 = PyDict_New(); if (unlikely(!__pyx_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_1 = PyTuple_New(1); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_INCREF(__pyx_builtin_Exception); + PyTuple_SET_ITEM(__pyx_1, 0, __pyx_builtin_Exception); + if (PyDict_SetItemString(((PyObject *)__pyx_2), "__doc__", __pyx_kp_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_3 = __Pyx_CreateClass(((PyObject *)__pyx_1), ((PyObject *)__pyx_2), __pyx_kp_ParseError, "sphinx.pycode.pgen2.parse"); if (unlikely(!__pyx_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(((PyObject *)__pyx_1)); __pyx_1 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":22 + * """Exception to signal the parser is stuck.""" + * + * def __init__(self, msg, type, value, context): # <<<<<<<<<<<<<< + * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % + * (msg, type, value, context)) + */ + __pyx_1 = PyCFunction_New(&__pyx_mdef_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__, 0); if (unlikely(!__pyx_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + __pyx_4 = PyMethod_New(__pyx_1, 0, __pyx_3); if (unlikely(!__pyx_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_1); __pyx_1 = 0; + if (PyObject_SetAttr(__pyx_3, __pyx_kp___init__, __pyx_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_4); __pyx_4 = 0; + if (PyObject_SetAttr(__pyx_m, __pyx_kp_ParseError, __pyx_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;} + Py_DECREF(__pyx_3); __pyx_3 = 0; + Py_DECREF(((PyObject *)__pyx_2)); __pyx_2 = 0; + + /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":149 + * self.rootnode.used_names = self.used_names + * + * cdef convert(self, raw_node): # <<<<<<<<<<<<<< + * type, value, context, children = raw_node + * if children or type in self._grammar_number2symbol: + */ + #if PY_MAJOR_VERSION < 3 + return; + #else + return __pyx_m; + #endif + __pyx_L1_error:; + Py_XDECREF(__pyx_1); + Py_XDECREF(__pyx_2); + Py_XDECREF(__pyx_3); + Py_XDECREF(__pyx_4); + __Pyx_AddTraceback("sphinx.pycode.pgen2.parse"); + #if PY_MAJOR_VERSION >= 3 + return NULL; + #endif +} + +static const char *__pyx_filenames[] = { + "parse.pyx", +}; + +/* Runtime support code */ + +static void __pyx_init_filenames(void) { + __pyx_f = __pyx_filenames; +} + +static INLINE void __Pyx_RaiseArgtupleTooLong( + Py_ssize_t num_expected, + Py_ssize_t num_found) +{ + const char* error_message = + #if PY_VERSION_HEX < 0x02050000 + "function takes at most %d positional arguments (%d given)"; + #else + "function takes at most %zd positional arguments (%zd given)"; + #endif + PyErr_Format(PyExc_TypeError, error_message, num_expected, num_found); +} + +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) { + PyObject *__import__ = 0; + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + __import__ = PyObject_GetAttrString(__pyx_b, "__import__"); + if (!__import__) + goto bad; + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + module = PyObject_CallFunction(__import__, "OOOO", + name, global_dict, empty_dict, list); +bad: + Py_XDECREF(empty_list); + Py_XDECREF(__import__); + Py_XDECREF(empty_dict); + return module; +} + +static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) { + PyObject *result; + result = PyObject_GetAttr(dict, name); + if (!result) + PyErr_SetObject(PyExc_NameError, name); + return result; +} + +static PyObject *__Pyx_CreateClass( + PyObject *bases, PyObject *dict, PyObject *name, char *modname) +{ + PyObject *py_modname; + PyObject *result = 0; + + #if PY_MAJOR_VERSION < 3 + py_modname = PyString_FromString(modname); + #else + py_modname = PyUnicode_FromString(modname); + #endif + if (!py_modname) + goto bad; + if (PyDict_SetItemString(dict, "__module__", py_modname) < 0) + goto bad; + #if PY_MAJOR_VERSION < 3 + result = PyClass_New(bases, dict, name); + #else + result = PyObject_CallFunctionObjArgs((PyObject *)&PyType_Type, name, bases, dict, NULL); + #endif +bad: + Py_XDECREF(py_modname); + return result; +} + + +static PyObject *__Pyx_UnpackItem(PyObject *iter, Py_ssize_t index) { + PyObject *item; + if (!(item = PyIter_Next(iter))) { + if (!PyErr_Occurred()) { + PyErr_Format(PyExc_ValueError, + #if PY_VERSION_HEX < 0x02050000 + "need more than %d values to unpack", (int)index); + #else + "need more than %zd values to unpack", index); + #endif + } + } + return item; +} + +static int __Pyx_EndUnpack(PyObject *iter) { + PyObject *item; + if ((item = PyIter_Next(iter))) { + Py_DECREF(item); + PyErr_SetString(PyExc_ValueError, "too many values to unpack"); + return -1; + } + else if (!PyErr_Occurred()) + return 0; + else + return -1; +} + +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) { + Py_XINCREF(type); + Py_XINCREF(value); + Py_XINCREF(tb); + /* First, check the traceback argument, replacing None with NULL. */ + if (tb == Py_None) { + Py_DECREF(tb); + tb = 0; + } + else if (tb != NULL && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + /* Next, replace a missing value with None */ + if (value == NULL) { + value = Py_None; + Py_INCREF(value); + } + #if PY_VERSION_HEX < 0x02050000 + if (!PyClass_Check(type)) + #else + if (!PyType_Check(type)) + #endif + { + /* Raising an instance. The value should be a dummy. */ + if (value != Py_None) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + /* Normalize to raise , */ + Py_DECREF(value); + value = type; + #if PY_VERSION_HEX < 0x02050000 + if (PyInstance_Check(type)) { + type = (PyObject*) ((PyInstanceObject*)type)->in_class; + Py_INCREF(type); + } + else { + type = 0; + PyErr_SetString(PyExc_TypeError, + "raise: exception must be an old-style class or instance"); + goto raise_error; + } + #else + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + #endif + } + PyErr_Restore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} + + +static void __Pyx_WriteUnraisable(const char *name) { + PyObject *old_exc, *old_val, *old_tb; + PyObject *ctx; + PyErr_Fetch(&old_exc, &old_val, &old_tb); + #if PY_MAJOR_VERSION < 3 + ctx = PyString_FromString(name); + #else + ctx = PyUnicode_FromString(name); + #endif + PyErr_Restore(old_exc, old_val, old_tb); + if (!ctx) + ctx = Py_None; + PyErr_WriteUnraisable(ctx); +} + +static int __Pyx_SetVtable(PyObject *dict, void *vtable) { + PyObject *pycobj = 0; + int result; + + pycobj = PyCObject_FromVoidPtr(vtable, 0); + if (!pycobj) + goto bad; + if (PyDict_SetItemString(dict, "__pyx_vtable__", pycobj) < 0) + goto bad; + result = 0; + goto done; + +bad: + result = -1; +done: + Py_XDECREF(pycobj); + return result; +} + +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" + +static void __Pyx_AddTraceback(const char *funcname) { + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + PyObject *py_globals = 0; + PyObject *empty_string = 0; + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(__pyx_filename); + #else + py_srcfile = PyUnicode_FromString(__pyx_filename); + #endif + if (!py_srcfile) goto bad; + if (__pyx_clineno) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_globals = PyModule_GetDict(__pyx_m); + if (!py_globals) goto bad; + #if PY_MAJOR_VERSION < 3 + empty_string = PyString_FromStringAndSize("", 0); + #else + empty_string = PyBytes_FromStringAndSize("", 0); + #endif + if (!empty_string) goto bad; + py_code = PyCode_New( + 0, /*int argcount,*/ + #if PY_MAJOR_VERSION >= 3 + 0, /*int kwonlyargcount,*/ + #endif + 0, /*int nlocals,*/ + 0, /*int stacksize,*/ + 0, /*int flags,*/ + empty_string, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + __pyx_lineno, /*int firstlineno,*/ + empty_string /*PyObject *lnotab*/ + ); + if (!py_code) goto bad; + py_frame = PyFrame_New( + PyThreadState_Get(), /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + py_globals, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + py_frame->f_lineno = __pyx_lineno; + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + Py_XDECREF(empty_string); + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode && (!t->is_identifier)) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else /* Python 3+ has unicode identifiers */ + if (t->is_identifier || (t->is_unicode && t->intern)) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->is_unicode) { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + ++t; + } + return 0; +} + +/* Type Conversion Functions */ + +static INLINE Py_ssize_t __pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject* x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} + +static INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + if (x == Py_True) return 1; + else if (x == Py_False) return 0; + else return PyObject_IsTrue(x); +} + +static INLINE PY_LONG_LONG __pyx_PyInt_AsLongLong(PyObject* x) { + if (PyInt_CheckExact(x)) { + return PyInt_AS_LONG(x); + } + else if (PyLong_CheckExact(x)) { + return PyLong_AsLongLong(x); + } + else { + PY_LONG_LONG val; + PyObject* tmp = PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; + val = __pyx_PyInt_AsLongLong(tmp); + Py_DECREF(tmp); + return val; + } +} + +static INLINE unsigned PY_LONG_LONG __pyx_PyInt_AsUnsignedLongLong(PyObject* x) { + if (PyInt_CheckExact(x)) { + long val = PyInt_AS_LONG(x); + if (unlikely(val < 0)) { + PyErr_SetString(PyExc_TypeError, "Negative assignment to unsigned type."); + return (unsigned PY_LONG_LONG)-1; + } + return val; + } + else if (PyLong_CheckExact(x)) { + return PyLong_AsUnsignedLongLong(x); + } + else { + PY_LONG_LONG val; + PyObject* tmp = PyNumber_Int(x); if (!tmp) return (PY_LONG_LONG)-1; + val = __pyx_PyInt_AsUnsignedLongLong(tmp); + Py_DECREF(tmp); + return val; + } +} + + +static INLINE unsigned char __pyx_PyInt_unsigned_char(PyObject* x) { + if (sizeof(unsigned char) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + unsigned char val = (unsigned char)long_val; + if (unlikely((val != long_val) || (long_val < 0))) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to unsigned char"); + return (unsigned char)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE unsigned short __pyx_PyInt_unsigned_short(PyObject* x) { + if (sizeof(unsigned short) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + unsigned short val = (unsigned short)long_val; + if (unlikely((val != long_val) || (long_val < 0))) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to unsigned short"); + return (unsigned short)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE char __pyx_PyInt_char(PyObject* x) { + if (sizeof(char) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + char val = (char)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); + return (char)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE short __pyx_PyInt_short(PyObject* x) { + if (sizeof(short) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + short val = (short)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to short"); + return (short)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE int __pyx_PyInt_int(PyObject* x) { + if (sizeof(int) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + int val = (int)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); + return (int)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE long __pyx_PyInt_long(PyObject* x) { + if (sizeof(long) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + long val = (long)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); + return (long)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE signed char __pyx_PyInt_signed_char(PyObject* x) { + if (sizeof(signed char) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + signed char val = (signed char)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to signed char"); + return (signed char)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE signed short __pyx_PyInt_signed_short(PyObject* x) { + if (sizeof(signed short) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + signed short val = (signed short)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to signed short"); + return (signed short)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE signed int __pyx_PyInt_signed_int(PyObject* x) { + if (sizeof(signed int) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + signed int val = (signed int)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to signed int"); + return (signed int)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE signed long __pyx_PyInt_signed_long(PyObject* x) { + if (sizeof(signed long) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + signed long val = (signed long)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to signed long"); + return (signed long)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + +static INLINE long double __pyx_PyInt_long_double(PyObject* x) { + if (sizeof(long double) < sizeof(long)) { + long long_val = __pyx_PyInt_AsLong(x); + long double val = (long double)long_val; + if (unlikely((val != long_val) )) { + PyErr_SetString(PyExc_OverflowError, "value too large to convert to long double"); + return (long double)-1; + } + return val; + } + else { + return __pyx_PyInt_AsLong(x); + } +} + diff --git a/sphinx/pycode/pgen2/parse.pyx b/sphinx/pycode/pgen2/parse.pyx index 6a11ee6b5..537d73936 100644 --- a/sphinx/pycode/pgen2/parse.pyx +++ b/sphinx/pycode/pgen2/parse.pyx @@ -1,6 +1,8 @@ # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. +# Adapted from parse.py to be compiled with Cython by Georg Brandl. + """Parser engine for the grammar tables generated by pgen. The grammar table must be loaded first. @@ -10,7 +12,7 @@ how this parsing engine works. """ -from sphinx.pycode.pytree import Node, Leaf +from sphinx.pycode.nodes import Node, Leaf DEF NAME = 1 From cac6f969484b40f4398d11f1f81efb9ce9808d62 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Thu, 1 Jan 2009 23:49:32 +0100 Subject: [PATCH 18/29] Fix long line. --- sphinx/pycode/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index d707db2d6..03481233f 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -233,8 +233,8 @@ class ModuleAnalyzer(object): indent += 1 elif type == token.DEDENT: indent -= 1 - # if the stacklevel is the same as it was before the last def/class block, - # this dedent closes that block + # if the stacklevel is the same as it was before the last + # def/class block, this dedent closes that block if stack and indent == stack[-1][3]: dtype, fullname, startline, _ = stack.pop() endline = spos[0] From 8161b024ed1754c127cda4fb4453225cc04877a4 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Fri, 2 Jan 2009 00:25:29 +0100 Subject: [PATCH 19/29] Add lines and start-after/end-before options to literalinclude. --- sphinx/directives/code.py | 63 ++++++++++++++++++++++++++++----------- sphinx/util/__init__.py | 25 +++++++++++++++- 2 files changed, 69 insertions(+), 19 deletions(-) diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py index cc74566d1..07fceaae8 100644 --- a/sphinx/directives/code.py +++ b/sphinx/directives/code.py @@ -3,7 +3,7 @@ sphinx.directives.code ~~~~~~~~~~~~~~~~~~~~~~ - :copyright: 2007-2008 by Georg Brandl. + :copyright: 2007-2009 by Georg Brandl. :license: BSD, see LICENSE for details. """ @@ -15,6 +15,7 @@ from docutils import nodes from docutils.parsers.rst import directives from sphinx import addnodes +from sphinx.util import parselinenos # ------ highlight directive -------------------------------------------------------- @@ -68,19 +69,9 @@ def literalinclude_directive(name, arguments, options, content, lineno, lineno - state_machine.input_offset - 1))) fn = path.normpath(path.join(source_dir, rel_fn)) - fromline = toline = None - objectname = options.get('pyobject') - if objectname is not None: - from sphinx.pycode import ModuleAnalyzer - analyzer = ModuleAnalyzer.for_file(fn, '') - tags = analyzer.find_tags() - if objectname not in tags: - return [state.document.reporter.warning( - 'Object named %r not found in include file %r' % - (objectname, arguments[0]), line=lineno)] - else: - fromline = tags[objectname][1] - 1 - toline = tags[objectname][2] - 1 + if 'pyobject' in options and 'lines' in options: + return [state.document.reporter.warning( + 'Cannot use both "pyobject" and "lines" options', line=lineno)] encoding = options.get('encoding', env.config.source_encoding) try: @@ -96,7 +87,43 @@ def literalinclude_directive(name, arguments, options, content, lineno, 'Encoding %r used for reading included file %r seems to ' 'be wrong, try giving an :encoding: option' % (encoding, arguments[0]))] - text = ''.join(lines[fromline:toline]) + + objectname = options.get('pyobject') + if objectname is not None: + from sphinx.pycode import ModuleAnalyzer + analyzer = ModuleAnalyzer.for_file(fn, '') + tags = analyzer.find_tags() + if objectname not in tags: + return [state.document.reporter.warning( + 'Object named %r not found in include file %r' % + (objectname, arguments[0]), line=lineno)] + else: + lines = lines[tags[objectname][1] - 1 : tags[objectname][2]] + + linespec = options.get('lines') + if linespec is not None: + try: + linelist = parselinenos(linespec, len(lines)) + except ValueError, err: + return [state.document.reporter.warning(str(err), line=lineno)] + lines = [lines[i] for i in linelist] + + startafter = options.get('start-after') + endbefore = options.get('end-before') + if startafter is not None or endbefore is not None: + use = not startafter + res = [] + for line in lines: + if not use and startafter in line: + use = True + elif use and endbefore in line: + use = False + break + elif use: + res.append(line) + lines = res + + text = ''.join(lines) retnode = nodes.literal_block(text, text, source=fn) retnode.line = 1 if options.get('language', ''): @@ -110,9 +137,9 @@ literalinclude_directive.options = {'linenos': directives.flag, 'language': directives.unchanged_required, 'encoding': directives.encoding, 'pyobject': directives.unchanged_required, - #'lines': directives.unchanged_required, - #'start-after': directives.unchanged_required, - #'end-before': directives.unchanged_required, + 'lines': directives.unchanged_required, + 'start-after': directives.unchanged_required, + 'end-before': directives.unchanged_required, } literalinclude_directive.content = 0 literalinclude_directive.arguments = (1, 0, 0) diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index e25bc5a1b..fbd7d2436 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -5,7 +5,7 @@ Utility functions for Sphinx. - :copyright: 2007-2008 by Georg Brandl. + :copyright: 2007-2009 by Georg Brandl. :license: BSD, see LICENSE for details. """ @@ -324,3 +324,26 @@ class FilenameUniqDict(dict): def __setstate__(self, state): self._existing = state + + +def parselinenos(spec, total): + """ + Parse a line number spec (such as "1,2,4-6") and return a list of + wanted line numbers. + """ + items = list() + parts = spec.split(',') + for part in parts: + try: + begend = part.strip().split('-') + if len(begend) > 2: + raise ValueError + if len(begend) == 1: + items.append(int(begend[0])-1) + else: + start = (begend[0] == '') and 0 or int(begend[0])-1 + end = (begend[1] == '') and total or int(begend[1]) + items.extend(xrange(start, end)) + except Exception, err: + raise ValueError('invalid line number spec: %r' % spec) + return items From 9fc2437773e47986778dba58753c203dc02df9dd Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Fri, 2 Jan 2009 00:32:02 +0100 Subject: [PATCH 20/29] Document new literalinclude options. --- doc/markup/code.rst | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/doc/markup/code.rst b/doc/markup/code.rst index 2fd51c84b..0bf8343bb 100644 --- a/doc/markup/code.rst +++ b/doc/markup/code.rst @@ -123,10 +123,25 @@ Includes This would only include the code lines belonging to the ``start()`` method in the ``Timer`` class within the file. + Alternately, you can specify exactly which lines to include by giving a + ``lines`` option:: + + .. literalinclude:: example.py + :lines: 1,3,5-10,20- + + This includes the lines 1, 3, 5 to 10 and lines 20 to the last line. + + Another way to control which part of the file is included is to use the + ``start-after`` and ``end-before`` options (or only one of them). If + ``start-after`` is given as a string option, only lines that follow the first + line containing that string are included. If ``end-before`` is given as a + string option, only lines that precede the first lines containing that string + are included. + .. versionadded:: 0.4.3 The ``encoding`` option. .. versionadded:: 0.6 - The ``pyobject`` option. + The ``pyobject``, ``lines``, ``start-after`` and ``end-before`` options. .. rubric:: Footnotes From 4d083fabbe5c66c622c2725efcac53cb4044cf9d Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sat, 3 Jan 2009 12:29:42 +0100 Subject: [PATCH 21/29] Add tests for new literalinclude options, and fix an off-by-one bug. --- sphinx/directives/code.py | 2 +- tests/root/includes.txt | 22 ++++++++++++++++++++++ tests/root/literal.inc | 9 +++++++++ tests/test_build.py | 36 +++++++++++++++++++++++++----------- tests/test_markup.py | 2 ++ 5 files changed, 59 insertions(+), 12 deletions(-) diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py index 07fceaae8..808366894 100644 --- a/sphinx/directives/code.py +++ b/sphinx/directives/code.py @@ -98,7 +98,7 @@ def literalinclude_directive(name, arguments, options, content, lineno, 'Object named %r not found in include file %r' % (objectname, arguments[0]), line=lineno)] else: - lines = lines[tags[objectname][1] - 1 : tags[objectname][2]] + lines = lines[tags[objectname][1] - 1 : tags[objectname][2] - 1] linespec = options.get('lines') if linespec is not None: diff --git a/tests/root/includes.txt b/tests/root/includes.txt index d2964d3f0..44e33af0d 100644 --- a/tests/root/includes.txt +++ b/tests/root/includes.txt @@ -15,6 +15,28 @@ Test file and literal inclusion .. include:: wrongenc.inc :encoding: latin-1 +Literalinclude options +====================== + +.. highlight:: text + +.. cssclass:: inc-pyobj1 +.. literalinclude:: literal.inc + :pyobject: Foo + +.. cssclass:: inc-pyobj2 +.. literalinclude:: literal.inc + :pyobject: Bar.baz + +.. cssclass:: inc-lines +.. literalinclude:: literal.inc + :lines: 6-7,9 + +.. cssclass:: inc-startend +.. literalinclude:: literal.inc + :start-after: coding: utf-8 + :end-before: class Foo + Testing downloadable files ========================== diff --git a/tests/root/literal.inc b/tests/root/literal.inc index a4ce93d24..d5b9890c9 100644 --- a/tests/root/literal.inc +++ b/tests/root/literal.inc @@ -2,3 +2,12 @@ # -*- coding: utf-8 -*- foo = u"Including Unicode characters: üöä" + +class Foo: + pass + +class Bar: + def baz(): + pass + +def bar(): pass diff --git a/tests/test_build.py b/tests/test_build.py index 91506dad3..0ac5380e7 100644 --- a/tests/test_build.py +++ b/tests/test_build.py @@ -10,6 +10,7 @@ """ import os +import re import sys import difflib import htmlentitydefs @@ -32,7 +33,7 @@ WARNING: %(root)s/images.txt:9: Image file not readable: foo.png WARNING: %(root)s/images.txt:23: Nonlocal image URI found: http://www.python.org/logo.png WARNING: %(root)s/includes.txt:: (WARNING/2) Encoding 'utf-8' used for reading included \ file u'wrongenc.inc' seems to be wrong, try giving an :encoding: option -WARNING: %(root)s/includes.txt:34: Download file not readable: nonexisting.png +WARNING: %(root)s/includes.txt:56: Download file not readable: nonexisting.png """ HTML_WARNINGS = ENV_WARNINGS + """\ @@ -61,11 +62,19 @@ HTML_XPATH = { ".//pre": u'Max Strauß', ".//a[@href='_downloads/img.png']": '', ".//a[@href='_downloads/img1.png']": '', + ".//div[@class='inc-pyobj1 highlight-text']/div/pre": + r'^class Foo:\n pass\n\s*$', + ".//div[@class='inc-pyobj2 highlight-text']/div/pre": + r'^ def baz\(\):\n pass\n\s*$', + ".//div[@class='inc-lines highlight-text']/div/pre": + r'^class Foo:\n pass\nclass Bar:\n$', + ".//div[@class='inc-startend highlight-text']/div/pre": + ur'^foo = u"Including Unicode characters: üöä"\n$', }, 'autodoc.html': { ".//dt[@id='test_autodoc.Class']": '', - ".//dt[@id='test_autodoc.function']/em": '**kwds', - ".//dd": 'Return spam.', + ".//dt[@id='test_autodoc.function']/em": r'\*\*kwds', + ".//dd": r'Return spam\.', }, 'markup.html': { ".//meta[@name='author'][@content='Me']": '', @@ -81,7 +90,7 @@ HTML_XPATH = { }, 'contents.html': { ".//meta[@name='hc'][@content='hcval']": '', - ".//td[@class='label']": '[Ref1]', + ".//td[@class='label']": r'\[Ref1\]', ".//li[@class='toctree-l1']/a": 'Testing various markup', ".//li[@class='toctree-l2']/a": 'Admonitions', ".//title": 'Sphinx ', @@ -117,18 +126,23 @@ def test_html(app): parser = NslessParser() parser.entity.update(htmlentitydefs.entitydefs) etree = ET.parse(os.path.join(app.outdir, fname), parser) - for path, text in paths.iteritems(): + for path, check in paths.iteritems(): nodes = list(etree.findall(path)) assert nodes != [] - if not text: + if callable(check): + check(nodes) + elif not check: # only check for node presence continue - for node in nodes: - if node.text and text in node.text: - break else: - assert False, ('%r not found in any node matching ' - 'path %s in %s' % (text, path, fname)) + rex = re.compile(check) + for node in nodes: + if node.text and rex.search(node.text): + break + else: + assert False, ('%r not found in any node matching ' + 'path %s in %s: %r' % (check, path, fname, + [node.text for node in nodes])) @with_app(buildername='latex', warning=latex_warnfile) diff --git a/tests/test_markup.py b/tests/test_markup.py index 86889a2ac..295838664 100644 --- a/tests/test_markup.py +++ b/tests/test_markup.py @@ -17,11 +17,13 @@ from docutils import frontend, utils, nodes from docutils.parsers import rst from sphinx import addnodes +from sphinx.util import texescape from sphinx.writers.html import HTMLWriter, SmartyPantsHTMLTranslator from sphinx.writers.latex import LaTeXWriter, LaTeXTranslator def setup_module(): global app, settings, parser + texescape.init() # otherwise done by the latex builder app = TestApp(cleanenv=True) optparser = frontend.OptionParser(components=(rst.Parser, HTMLWriter, LaTeXWriter)) settings = optparser.get_default_values() From 0c4a2bbe8d09fdaf616ee63d15a2266b35a7296d Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sat, 3 Jan 2009 21:18:28 +0100 Subject: [PATCH 22/29] Fix bad node value for makevar. --- sphinx/roles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sphinx/roles.py b/sphinx/roles.py index 2bbdab94f..d2e9558e9 100644 --- a/sphinx/roles.py +++ b/sphinx/roles.py @@ -24,7 +24,7 @@ generic_docroles = { 'guilabel' : nodes.strong, 'kbd' : nodes.literal, 'mailheader' : addnodes.literal_emphasis, - 'makevar' : nodes.Text, + 'makevar' : nodes.strong, 'manpage' : addnodes.literal_emphasis, 'mimetype' : addnodes.literal_emphasis, 'newsgroup' : addnodes.literal_emphasis, From 5df8e162f9b2740ac2b74fa15bc3c0be8b56a02b Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 14:39:36 +0100 Subject: [PATCH 23/29] Fix a few remaining copyrights and add 2009 to license. --- LICENSE | 2 +- doc/conf.py | 2 +- sphinx/builders/__init__.py | 2 +- sphinx/environment.py | 4 ---- sphinx/jinja2glue.py | 2 +- sphinx/pycode/__init__.py | 2 +- sphinx/pycode/nodes.py | 2 +- sphinx/util/docstrings.py | 2 +- sphinx/util/jsdump.py | 2 +- 9 files changed, 8 insertions(+), 12 deletions(-) diff --git a/LICENSE b/LICENSE index 8b1ad263c..faac79f0b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2007-2008 by the Sphinx team (see AUTHORS file). +Copyright (c) 2007-2009 by the Sphinx team (see AUTHORS file). All rights reserved. License for Sphinx diff --git a/doc/conf.py b/doc/conf.py index 89247821b..709d6f750 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -36,7 +36,7 @@ master_doc = 'contents' # General substitutions. project = 'Sphinx' -copyright = '2008, Georg Brandl' +copyright = '2007-2009, Georg Brandl' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py index 52b4b0ac2..8847b6dca 100644 --- a/sphinx/builders/__init__.py +++ b/sphinx/builders/__init__.py @@ -5,7 +5,7 @@ Builder superclass for all builders. - :copyright: 2007-2008 by Georg Brandl, Sebastian Wiesner, Horst Gutmann. + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/sphinx/environment.py b/sphinx/environment.py index 0562ce66b..b70bd250e 100644 --- a/sphinx/environment.py +++ b/sphinx/environment.py @@ -5,11 +5,7 @@ Global creation environment. -<<<<<<< local - :copyright: 2007-2009 by Georg Brandl. -======= :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. ->>>>>>> other :license: BSD, see LICENSE for details. """ diff --git a/sphinx/jinja2glue.py b/sphinx/jinja2glue.py index 996df70bb..0c7c5d721 100644 --- a/sphinx/jinja2glue.py +++ b/sphinx/jinja2glue.py @@ -5,7 +5,7 @@ Glue code for the jinja2 templating engine. - :copyright: 2008 by Sebastian Wiesner. + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 03481233f..a61a051a5 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -5,7 +5,7 @@ Utilities parsing and analyzing Python code. - :copyright: 2008-2009 by Georg Brandl. + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/sphinx/pycode/nodes.py b/sphinx/pycode/nodes.py index d0fb522bb..4d27fc660 100644 --- a/sphinx/pycode/nodes.py +++ b/sphinx/pycode/nodes.py @@ -5,7 +5,7 @@ Parse tree node implementations. - :copyright: 2009 by Georg Brandl. + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/sphinx/util/docstrings.py b/sphinx/util/docstrings.py index d7e20e4cc..1b0a599a6 100644 --- a/sphinx/util/docstrings.py +++ b/sphinx/util/docstrings.py @@ -5,7 +5,7 @@ Utilities for docstring processing. - :copyright: 2008 by Georg Brandl. + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ diff --git a/sphinx/util/jsdump.py b/sphinx/util/jsdump.py index 2eb4619e5..8c760b68c 100644 --- a/sphinx/util/jsdump.py +++ b/sphinx/util/jsdump.py @@ -6,7 +6,7 @@ This module implements a simple JavaScript serializer. Uses the basestring encode function from simplejson by Bob Ippolito. - :copyright: Copyright 2008 by the Sphinx team, see AUTHORS. + :copyright: Copyright 2007-2009 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ From 8cf33a70439d91b85d500908bde1bb7f6e9958af Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 19:16:52 +0100 Subject: [PATCH 24/29] Cache tags and attribute docs in the analyzer. --- sphinx/pycode/__init__.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index a61a051a5..0141ede40 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -176,6 +176,10 @@ class ModuleAnalyzer(object): self.tokens = None # will be filled by parse() self.parsetree = None + # will be filled by find_attr_docs() + self.attr_docs = None + # will be filled by find_tags() + self.tags = None def tokenize(self): """Generate tokens from the source.""" @@ -193,13 +197,18 @@ class ModuleAnalyzer(object): def find_attr_docs(self, scope=''): """Find class and module-level attributes and their documentation.""" + if self.attr_docs is not None: + return self.attr_docs self.parse() attr_visitor = AttrDocVisitor(number2name, scope) attr_visitor.visit(self.parsetree) + self.attr_docs = attr_visitor.collected return attr_visitor.collected def find_tags(self): """Find class, function and method definitions and their location.""" + if self.tags is not None: + return self.tags self.tokenize() result = {} namespace = [] @@ -246,6 +255,7 @@ class ModuleAnalyzer(object): if defline: defline = False expect_indent = True + self.tags = result return result From 2e9866821eecbde6dc49dd42e407ecf8b41ce1f1 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 19:35:03 +0100 Subject: [PATCH 25/29] Support all types of string literals in literals.py. --- sphinx/pycode/pgen2/literals.py | 54 +++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 10 deletions(-) diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py index 0b3948a54..78667df01 100644 --- a/sphinx/pycode/pgen2/literals.py +++ b/sphinx/pycode/pgen2/literals.py @@ -1,6 +1,8 @@ # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. +# Extended to handle raw and unicode literals by Georg Brandl. + """Safely evaluate Python string literals without using eval().""" import re @@ -16,28 +18,60 @@ simple_escapes = {"a": "\a", '"': '"', "\\": "\\"} +def convert_hex(x, n): + if len(x) < n+1: + raise ValueError("invalid hex string escape ('\\%s')" % x) + try: + return int(x[1:], 16) + except ValueError: + raise ValueError("invalid hex string escape ('\\%s')" % x) + def escape(m): all, tail = m.group(0, 1) assert all.startswith("\\") esc = simple_escapes.get(tail) if esc is not None: return esc - if tail.startswith("x"): - hexes = tail[1:] - if len(hexes) < 2: - raise ValueError("invalid hex string escape ('\\%s')" % tail) + elif tail.startswith("x"): + return chr(convert_hex(tail, 2)) + elif tail.startswith('u'): + return unichr(convert_hex(tail, 4)) + elif tail.startswith('U'): + return unichr(convert_hex(tail, 8)) + elif tail.startswith('N'): + import unicodedata try: - i = int(hexes, 16) - except ValueError: - raise ValueError("invalid hex string escape ('\\%s')" % tail) + return unicodedata.lookup(tail[1:-1]) + except KeyError: + raise ValueError("undefined character name %r" % tail[1:-1]) else: try: - i = int(tail, 8) + return chr(int(tail, 8)) except ValueError: raise ValueError("invalid octal string escape ('\\%s')" % tail) - return chr(i) + +def escaperaw(m): + all, tail = m.group(0, 1) + if tail.startswith('u'): + return unichr(convert_hex(tail, 4)) + elif tail.startswith('U'): + return unichr(convert_hex(tail, 8)) + else: + return all + +escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})") +uni_escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}|" + r"u[0-9a-fA-F]{0,4}|U[0-9a-fA-F]{0,8}|N\{.+?\})") def evalString(s): + regex = escape_re + repl = escape + if s.startswith('u') or s.startswith('U'): + regex = uni_escape_re + s = s[1:] + if s.startswith('r') or s.startswith('R'): + repl = escaperaw + s = s[1:] assert s.startswith("'") or s.startswith('"'), repr(s[:1]) q = s[0] if s[:3] == q*3: @@ -45,7 +79,7 @@ def evalString(s): assert s.endswith(q), repr(s[-len(q):]) assert len(s) >= 2*len(q) s = s[len(q):-len(q)] - return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s) + return regex.sub(repl, s) def test(): for i in range(256): From cae384f5ff2bd3da925362eca0b89fa2f985029b Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 20:02:24 +0100 Subject: [PATCH 26/29] Add support for decoding strings and comments to the analyzer. --- sphinx/pycode/__init__.py | 86 +++++++++++++++++++++------------ sphinx/pycode/pgen2/literals.py | 4 +- 2 files changed, 59 insertions(+), 31 deletions(-) diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py index 0141ede40..17dc6afbc 100644 --- a/sphinx/pycode/__init__.py +++ b/sphinx/pycode/__init__.py @@ -9,6 +9,7 @@ :license: BSD, see LICENSE for details. """ +import re import sys from os import path from cStringIO import StringIO @@ -35,6 +36,9 @@ number2name = pygrammar.number2symbol.copy() number2name.update(token.tok_name) +# a regex to recognize coding cookies +_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)') + _eq = nodes.Leaf(token.EQUAL, '=') @@ -46,8 +50,9 @@ class AttrDocVisitor(nodes.NodeVisitor): The docstrings can either be in special '#:' comments before the assignment or in a docstring after it. """ - def init(self, scope): + def init(self, scope, encoding): self.scope = scope + self.encoding = encoding self.namespace = [] self.collected = {} @@ -71,6 +76,7 @@ class AttrDocVisitor(nodes.NodeVisitor): if not pnode or pnode.type not in (token.INDENT, token.DEDENT): break prefix = pnode.get_prefix() + prefix = prefix.decode(self.encoding) docstring = prepare_commentdoc(prefix) if docstring: self.add_docstring(node, docstring) @@ -86,7 +92,8 @@ class AttrDocVisitor(nodes.NodeVisitor): if prev.type == sym.simple_stmt and \ prev[0].type == sym.expr_stmt and _eq in prev[0].children: # need to "eval" the string because it's returned in its original form - docstring = prepare_docstring(literals.evalString(node[0].value)) + docstring = literals.evalString(node[0].value, self.encoding) + docstring = prepare_docstring(docstring) self.add_docstring(prev[0], docstring) def visit_funcdef(self, node): @@ -136,38 +143,48 @@ class ModuleAnalyzer(object): @classmethod def for_module(cls, modname): if ('module', modname) in cls.cache: - return cls.cache['module', modname] - if modname not in sys.modules: - try: - __import__(modname) - except ImportError, err: - raise PycodeError('error importing %r' % modname, err) - mod = sys.modules[modname] - if hasattr(mod, '__loader__'): - try: - source = mod.__loader__.get_source(modname) - except Exception, err: - raise PycodeError('error getting source for %r' % modname, err) - obj = cls.for_string(source, modname) - cls.cache['module', modname] = obj - return obj - filename = getattr(mod, '__file__', None) - if filename is None: - raise PycodeError('no source found for module %r' % modname) - filename = path.normpath(filename) - lfilename = filename.lower() - if lfilename.endswith('.pyo') or lfilename.endswith('.pyc'): - filename = filename[:-1] - elif not lfilename.endswith('.py'): - raise PycodeError('source is not a .py file: %r' % filename) - if not path.isfile(filename): - raise PycodeError('source file is not present: %r' % filename) - obj = cls.for_file(filename, modname) + entry = cls.cache['module', modname] + if isinstance(entry, PycodeError): + raise entry + return entry + + try: + if modname not in sys.modules: + try: + __import__(modname) + except ImportError, err: + raise PycodeError('error importing %r' % modname, err) + mod = sys.modules[modname] + if hasattr(mod, '__loader__'): + try: + source = mod.__loader__.get_source(modname) + except Exception, err: + raise PycodeError('error getting source for %r' % modname, err) + obj = cls.for_string(source, modname) + cls.cache['module', modname] = obj + return obj + filename = getattr(mod, '__file__', None) + if filename is None: + raise PycodeError('no source found for module %r' % modname) + filename = path.normpath(filename) + lfilename = filename.lower() + if lfilename.endswith('.pyo') or lfilename.endswith('.pyc'): + filename = filename[:-1] + elif not lfilename.endswith('.py'): + raise PycodeError('source is not a .py file: %r' % filename) + if not path.isfile(filename): + raise PycodeError('source file is not present: %r' % filename) + obj = cls.for_file(filename, modname) + except PycodeError, err: + cls.cache['module', modname] = err + raise cls.cache['module', modname] = obj return obj def __init__(self, source, modname, srcname): + # name of the module self.modname = modname + # name of the source file self.srcname = srcname # file-like object yielding source lines self.source = source @@ -194,13 +211,22 @@ class ModuleAnalyzer(object): return self.tokenize() self.parsetree = pydriver.parse_tokens(self.tokens) + # find the source code encoding + encoding = sys.getdefaultencoding() + comments = self.parsetree.get_prefix() + for line in comments.splitlines()[:2]: + match = _coding_re.search(line) + if match is not None: + encoding = match.group(1) + break + self.encoding = encoding def find_attr_docs(self, scope=''): """Find class and module-level attributes and their documentation.""" if self.attr_docs is not None: return self.attr_docs self.parse() - attr_visitor = AttrDocVisitor(number2name, scope) + attr_visitor = AttrDocVisitor(number2name, scope, self.encoding) attr_visitor.visit(self.parsetree) self.attr_docs = attr_visitor.collected return attr_visitor.collected diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py index 78667df01..319002910 100644 --- a/sphinx/pycode/pgen2/literals.py +++ b/sphinx/pycode/pgen2/literals.py @@ -63,9 +63,11 @@ escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})") uni_escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}|" r"u[0-9a-fA-F]{0,4}|U[0-9a-fA-F]{0,8}|N\{.+?\})") -def evalString(s): +def evalString(s, encoding=None): regex = escape_re repl = escape + if encoding: + s = s.decode(encoding) if s.startswith('u') or s.startswith('U'): regex = uni_escape_re s = s[1:] From 27bc4e31894a004f43b19ce10c24ff845fc6ec54 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 20:27:37 +0100 Subject: [PATCH 27/29] Add new tests for the attribute feature and fix existing tests. --- sphinx/ext/autodoc.py | 108 ++++++++++++++++------------------------ sphinx/util/__init__.py | 14 ++++++ tests/test_autodoc.py | 65 +++++++++++++++++------- 3 files changed, 102 insertions(+), 85 deletions(-) diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index c8faa0dad..60ebd5b5e 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -21,7 +21,7 @@ from docutils import nodes from docutils.parsers.rst import directives from docutils.statemachine import ViewList -from sphinx.util import rpartition, nested_parse_with_titles +from sphinx.util import rpartition, nested_parse_with_titles, force_decode from sphinx.pycode import ModuleAnalyzer, PycodeError from sphinx.util.docstrings import prepare_docstring @@ -31,8 +31,6 @@ try: except NameError: base_exception = Exception -_charset_re = re.compile(r'coding[:=]\s*([-\w.]+)') -_module_charsets = {} py_ext_sig_re = re.compile( r'''^ ([\w.]+::)? # explicit module name @@ -173,27 +171,6 @@ def isdescriptor(x): return False -def get_module_charset(module): - """Return the charset of the given module (cached in _module_charsets).""" - if module in _module_charsets: - return _module_charsets[module] - try: - filename = __import__(module, None, None, ['foo']).__file__ - except (ImportError, AttributeError): - return None - if filename[-4:].lower() in ('.pyc', '.pyo'): - filename = filename[:-1] - for line in [linecache.getline(filename, x) for x in (1, 2)]: - match = _charset_re.search(line) - if match is not None: - charset = match.group(1) - break - else: - charset = 'ascii' - _module_charsets[module] = charset - return charset - - class RstGenerator(object): def __init__(self, options, document, lineno): self.options = options @@ -207,15 +184,19 @@ class RstGenerator(object): def warn(self, msg): self.warnings.append(self.reporter.warning(msg, line=self.lineno)) - def get_doc(self, what, name, obj): - """Format and yield lines of the docstring(s) for the object.""" + def get_doc(self, what, obj, encoding=None): + """Decode and return lines of the docstring(s) for the object.""" docstrings = [] + + # add the regular docstring if present if getattr(obj, '__doc__', None): docstrings.append(obj.__doc__) - # skip some lines in module docstrings if configured + + # skip some lines in module docstrings if configured (deprecated!) if what == 'module' and self.env.config.automodule_skip_lines and docstrings: docstrings[0] = '\n'.join(docstrings[0].splitlines() [self.env.config.automodule_skip_lines:]) + # for classes, what the "docstring" is can be controlled via an option if what in ('class', 'exception'): content = self.env.config.autoclass_content @@ -231,24 +212,12 @@ class RstGenerator(object): docstrings.append(initdocstring) # the default is only the class docstring - # decode the docstrings using the module's source encoding - charset = None - module = getattr(obj, '__module__', None) - if module is not None: - charset = get_module_charset(module) + # make sure we get Unicode docstrings + return [force_decode(docstring, encoding) for docstring in docstrings] - for docstring in docstrings: - if isinstance(docstring, str): - if charset: - docstring = docstring.decode(charset) - else: - try: - # try decoding with utf-8, should only work for real UTF-8 - docstring = docstring.decode('utf-8') - except UnicodeError: - # last resort -- can't fail - docstring = docstring.decode('latin1') - docstringlines = prepare_docstring(docstring) + def process_doc(self, docstrings, what, name, obj): + """Let the user process the docstrings.""" + for docstringlines in docstrings: if self.env.app: # let extensions preprocess docstrings self.env.app.emit('autodoc-process-docstring', @@ -397,24 +366,25 @@ class RstGenerator(object): # now, import the module and get object to document try: - todoc = module = __import__(mod, None, None, ['foo']) - if hasattr(module, '__file__') and module.__file__: - modfile = module.__file__ - if modfile[-4:].lower() in ('.pyc', '.pyo'): - modfile = modfile[:-1] - self.filename_set.add(modfile) - else: - modfile = None # e.g. for builtin and C modules + __import__(mod) + todoc = module = sys.modules[mod] for part in objpath: todoc = getattr(todoc, part) - # also get a source code analyzer for attribute docs - analyzer = ModuleAnalyzer.for_module(mod) except (ImportError, AttributeError, PycodeError), err: self.warn('autodoc can\'t import/find %s %r, it reported error: "%s", ' 'please check your spelling and sys.path' % (what, str(fullname), err)) return + # try to also get a source code analyzer for attribute docs + try: + analyzer = ModuleAnalyzer.for_module(mod) + except PycodeError, err: + # no source file -- e.g. for builtin and C modules + analyzer = None + else: + self.filename_set.add(analyzer.srcname) + # check __module__ of object if wanted (for members not given explicitly) if check_module: if hasattr(todoc, '__module__'): @@ -473,23 +443,29 @@ class RstGenerator(object): if what != 'module': indent += u' ' - if modfile: - sourcename = '%s:docstring of %s' % (modfile, fullname) + # add content from attribute documentation + if analyzer: + sourcename = '%s:docstring of %s' % (analyzer.srcname, fullname) + attr_docs = analyzer.find_attr_docs() + if what in ('data', 'attribute'): + key = ('.'.join(objpath[:-1]), objpath[-1]) + if key in attr_docs: + no_docstring = True + docstrings = [attr_docs[key]] + for i, line in enumerate(self.process_doc(docstrings, what, + fullname, todoc)): + self.result.append(indent + line, sourcename, i) else: sourcename = 'docstring of %s' % fullname - - # add content from attribute documentation - attr_docs = analyzer.find_attr_docs() - if what in ('data', 'attribute'): - key = ('.'.join(objpath[:-1]), objpath[-1]) - if key in attr_docs: - no_docstring = True - for i, line in enumerate(attr_docs[key]): - self.result.append(indent + line, sourcename, i) + attr_docs = {} # add content from docstrings if not no_docstring: - for i, line in enumerate(self.get_doc(what, fullname, todoc)): + encoding = analyzer and analyzer.encoding + docstrings = map(prepare_docstring, + self.get_doc(what, todoc, encoding)) + for i, line in enumerate(self.process_doc(docstrings, what, + fullname, todoc)): self.result.append(indent + line, sourcename, i) # add source content, if present diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py index b851e4842..4aacf67bd 100644 --- a/sphinx/util/__init__.py +++ b/sphinx/util/__init__.py @@ -347,3 +347,17 @@ def parselinenos(spec, total): except Exception, err: raise ValueError('invalid line number spec: %r' % spec) return items + + +def force_decode(string, encoding): + if isinstance(string, str): + if encoding: + string = string.decode(encoding) + else: + try: + # try decoding with utf-8, should only work for real UTF-8 + string = string.decode('utf-8') + except UnicodeError: + # last resort -- can't fail + string = string.decode('latin1') + return string diff --git a/tests/test_autodoc.py b/tests/test_autodoc.py index 837b5695b..dd9934f59 100644 --- a/tests/test_autodoc.py +++ b/tests/test_autodoc.py @@ -15,6 +15,7 @@ from util import * from docutils.statemachine import ViewList from sphinx.ext.autodoc import RstGenerator, cut_lines, between +from sphinx.util.docstrings import prepare_docstring def setup_module(): @@ -173,13 +174,14 @@ def test_format_signature(): def test_get_doc(): def getdocl(*args): - # strip the empty line at the end - return list(gen.get_doc(*args))[:-1] + ds = map(prepare_docstring, gen.get_doc(*args)) + # for testing purposes, concat them and strip the empty line at the end + return sum(ds, [])[:-1] # objects without docstring def f(): pass - assert getdocl('function', 'f', f) == [] + assert getdocl('function', f) == [] # standard function, diverse docstring styles... def f(): @@ -189,7 +191,7 @@ def test_get_doc(): Docstring """ for func in (f, g): - assert getdocl('function', 'f', func) == ['Docstring'] + assert getdocl('function', func) == ['Docstring'] # first line vs. other lines indentation def f(): @@ -198,17 +200,17 @@ def test_get_doc(): Other lines """ - assert getdocl('function', 'f', f) == ['First line', '', 'Other', ' lines'] + assert getdocl('function', f) == ['First line', '', 'Other', ' lines'] # charset guessing (this module is encoded in utf-8) def f(): """Döcstring""" - assert getdocl('function', 'f', f) == [u'Döcstring'] + assert getdocl('function', f) == [u'Döcstring'] # already-unicode docstrings must be taken literally def f(): u"""Döcstring""" - assert getdocl('function', 'f', f) == [u'Döcstring'] + assert getdocl('function', f) == [u'Döcstring'] # class docstring: depends on config value which one is taken class C: @@ -216,11 +218,11 @@ def test_get_doc(): def __init__(self): """Init docstring""" gen.env.config.autoclass_content = 'class' - assert getdocl('class', 'C', C) == ['Class docstring'] + assert getdocl('class', C) == ['Class docstring'] gen.env.config.autoclass_content = 'init' - assert getdocl('class', 'C', C) == ['Init docstring'] + assert getdocl('class', C) == ['Init docstring'] gen.env.config.autoclass_content = 'both' - assert getdocl('class', 'C', C) == ['Class docstring', '', 'Init docstring'] + assert getdocl('class', C) == ['Class docstring', '', 'Init docstring'] class D: """Class docstring""" @@ -232,18 +234,22 @@ def test_get_doc(): """ # Indentation is normalized for 'both' - assert getdocl('class', 'D', D) == ['Class docstring', '', 'Init docstring', - '', 'Other', ' lines'] + assert getdocl('class', D) == ['Class docstring', '', 'Init docstring', + '', 'Other', ' lines'] + + +def test_docstring_processing(): + def process(what, name, obj): + return list(gen.process_doc(map(prepare_docstring, gen.get_doc(what, obj)), + what, name, obj)) class E: def __init__(self): """Init docstring""" # docstring processing by event handler - assert getdocl('class', 'bar', E) == ['Init docstring', '', '42'] + assert process('class', 'bar', E) == ['Init docstring', '', '42', ''] - -def test_docstring_processing_functions(): lid = app.connect('autodoc-process-docstring', cut_lines(1, 1, ['function'])) def f(): """ @@ -251,7 +257,7 @@ def test_docstring_processing_functions(): second line third line """ - assert list(gen.get_doc('function', 'f', f)) == ['second line', ''] + assert process('function', 'f', f) == ['second line', ''] app.disconnect(lid) lid = app.connect('autodoc-process-docstring', between('---', ['function'])) @@ -263,7 +269,7 @@ def test_docstring_processing_functions(): --- third line """ - assert list(gen.get_doc('function', 'f', f)) == ['second line', ''] + assert process('function', 'f', f) == ['second line', ''] app.disconnect(lid) @@ -289,7 +295,7 @@ def test_generate(): def assert_result_contains(item, *args): gen.generate(*args) - print '\n'.join(gen.result) + #print '\n'.join(gen.result) assert len(gen.warnings) == 0, gen.warnings assert item in gen.result del gen.result[:] @@ -325,7 +331,10 @@ def test_generate(): assert_processes(should, 'class', 'Class', [], None) should.extend([('method', 'test_autodoc.Class.meth')]) assert_processes(should, 'class', 'Class', ['meth'], None) - should.extend([('attribute', 'test_autodoc.Class.prop')]) + should.extend([('attribute', 'test_autodoc.Class.prop'), + ('attribute', 'test_autodoc.Class.attr'), + ('attribute', 'test_autodoc.Class.docattr'), + ('attribute', 'test_autodoc.Class.udocattr')]) assert_processes(should, 'class', 'Class', ['__all__'], None) options.undoc_members = True should.append(('method', 'test_autodoc.Class.undocmeth')) @@ -369,6 +378,11 @@ def test_generate(): ('method', 'test_autodoc.Outer.Inner.meth')], 'class', 'Outer', ['__all__'], None) + # test generation for C modules (which have no source file) + gen.env.currmodule = 'time' + assert_processes([('function', 'time.asctime')], 'function', 'asctime', [], None) + assert_processes([('function', 'time.asctime')], 'function', 'asctime', [], None) + # --- generate fodder ------------ @@ -398,10 +412,22 @@ class Class(Base): """Method that should be skipped.""" pass + # should not be documented + skipattr = 'foo' + + #: should be documented -- süß + attr = 'bar' + @property def prop(self): """Property.""" + docattr = 'baz' + """should likewise be documented -- süß""" + + udocattr = 'quux' + u"""should be documented as well - süß""" + class CustomDict(dict): """Docstring.""" @@ -421,4 +447,5 @@ class Outer(object): def meth(self): """Foo""" + # should be documented as an alias factory = dict From 04689accd072bc47aadecbd4db52af075477ff3b Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 20:43:15 +0100 Subject: [PATCH 28/29] Small API change. --- sphinx/ext/autodoc.py | 8 ++++---- tests/test_autodoc.py | 6 ++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index 60ebd5b5e..35d96bda9 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -212,8 +212,9 @@ class RstGenerator(object): docstrings.append(initdocstring) # the default is only the class docstring - # make sure we get Unicode docstrings - return [force_decode(docstring, encoding) for docstring in docstrings] + # make sure we have Unicode docstrings, then sanitize and split into lines + return [prepare_docstring(force_decode(docstring, encoding)) + for docstring in docstrings] def process_doc(self, docstrings, what, name, obj): """Let the user process the docstrings.""" @@ -462,8 +463,7 @@ class RstGenerator(object): # add content from docstrings if not no_docstring: encoding = analyzer and analyzer.encoding - docstrings = map(prepare_docstring, - self.get_doc(what, todoc, encoding)) + docstrings = self.get_doc(what, todoc, encoding) for i, line in enumerate(self.process_doc(docstrings, what, fullname, todoc)): self.result.append(indent + line, sourcename, i) diff --git a/tests/test_autodoc.py b/tests/test_autodoc.py index dd9934f59..67220180a 100644 --- a/tests/test_autodoc.py +++ b/tests/test_autodoc.py @@ -15,7 +15,6 @@ from util import * from docutils.statemachine import ViewList from sphinx.ext.autodoc import RstGenerator, cut_lines, between -from sphinx.util.docstrings import prepare_docstring def setup_module(): @@ -174,7 +173,7 @@ def test_format_signature(): def test_get_doc(): def getdocl(*args): - ds = map(prepare_docstring, gen.get_doc(*args)) + ds = gen.get_doc(*args) # for testing purposes, concat them and strip the empty line at the end return sum(ds, [])[:-1] @@ -240,8 +239,7 @@ def test_get_doc(): def test_docstring_processing(): def process(what, name, obj): - return list(gen.process_doc(map(prepare_docstring, gen.get_doc(what, obj)), - what, name, obj)) + return list(gen.process_doc(gen.get_doc(what, obj), what, name, obj)) class E: def __init__(self): From 088d63648722b8a464aebb7b73a1157a1f066035 Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Sun, 4 Jan 2009 20:52:35 +0100 Subject: [PATCH 29/29] A few more fixes in autodoc. --- sphinx/ext/autodoc.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py index 35d96bda9..be3e3f0ff 100644 --- a/sphinx/ext/autodoc.py +++ b/sphinx/ext/autodoc.py @@ -371,7 +371,7 @@ class RstGenerator(object): todoc = module = sys.modules[mod] for part in objpath: todoc = getattr(todoc, part) - except (ImportError, AttributeError, PycodeError), err: + except (ImportError, AttributeError), err: self.warn('autodoc can\'t import/find %s %r, it reported error: "%s", ' 'please check your spelling and sys.path' % (what, str(fullname), err)) @@ -392,6 +392,11 @@ class RstGenerator(object): if todoc.__module__ != mod: return + # make sure that the result starts with an empty line. This is + # necessary for some situations where another directive preprocesses + # reST and no starting newline is present + self.result.append(u'', '') + # format the object's signature, if any try: sig = self.format_signature(what, fullname, todoc, args, retann) @@ -400,11 +405,6 @@ class RstGenerator(object): (fullname, err)) sig = '' - # make sure that the result starts with an empty line. This is - # necessary for some situations where another directive preprocesses - # reST and no starting newline is present - self.result.append(u'', '') - # now, create the directive header if what == 'method': directive = get_method_type(todoc) @@ -430,13 +430,14 @@ class RstGenerator(object): self.result.append(indent + u' :noindex:', '') self.result.append(u'', '') + # add inheritance info, if wanted if self.options.show_inheritance and what in ('class', 'exception'): if len(todoc.__bases__): bases = [b.__module__ == '__builtin__' and u':class:`%s`' % b.__name__ or u':class:`%s.%s`' % (b.__module__, b.__name__) for b in todoc.__bases__] - self.result.append(indent + u' Bases: %s' % ', '.join(bases), + self.result.append(indent + _(u' Bases: %s') % ', '.join(bases), '') self.result.append(u'', '') @@ -468,7 +469,7 @@ class RstGenerator(object): fullname, todoc)): self.result.append(indent + line, sourcename, i) - # add source content, if present + # add additional content (e.g. from document), if present if add_content: for line, src in zip(add_content.data, add_content.items): self.result.append(indent + line, src[0], src[1]) @@ -483,10 +484,10 @@ class RstGenerator(object): if objpath: self.env.autodoc_current_class = objpath[0] - # add members, if possible - all_members = members == ['__all__'] + # look for members to include + want_all_members = members == ['__all__'] members_check_module = False - if all_members: + if want_all_members: # unqualified :members: given if what == 'module': if hasattr(todoc, '__all__'): @@ -524,7 +525,7 @@ class RstGenerator(object): # if content is not None, no extra content from docstrings will be added content = None - if all_members and membername.startswith('_'): + if want_all_members and membername.startswith('_'): # ignore members whose name starts with _ by default skip = True else: @@ -547,6 +548,7 @@ class RstGenerator(object): if skip: continue + # determine member type if what == 'module': if isinstance(member, (FunctionType, BuiltinFunctionType)): memberwhat = 'function' @@ -581,6 +583,7 @@ class RstGenerator(object): memberwhat = 'attribute' else: continue + # give explicitly separated module name, so that members of inner classes # can be documented full_membername = mod + '::' + '.'.join(objpath + [membername])