mirror of
https://gitdl.cn/https://github.com/chakralinux/desktop.git
synced 2025-02-09 18:37:16 +08:00
66 lines
2.7 KiB
Diff
66 lines
2.7 KiB
Diff
# Description: Fix the tokenizer so comment docstrings work with Python 2.6.
|
|
# Bug: https://sourceforge.net/tracker/index.php?func=detail&aid=2585292&group_id=32455&atid=405618
|
|
# Bug-Debian: http://bugs.debian.org/590112
|
|
# Origin: https://sourceforge.net/tracker/?func=detail&aid=2872545&group_id=32455&atid=405620
|
|
# Author: Andre Malo (ndparker)
|
|
# Reviewed-by: Kenneth J. Pronovici <pronovic@debian.org>
|
|
--- a/epydoc/docparser.py
|
|
+++ b/epydoc/docparser.py
|
|
@@ -72,6 +72,26 @@
|
|
from epydoc.compat import *
|
|
|
|
######################################################################
|
|
+## Tokenizer change in 2.6
|
|
+######################################################################
|
|
+
|
|
+def comment_includes_nl():
|
|
+ """ Determine whether comments are parsed as one or two tokens... """
|
|
+ readline = iter(u'\n#\n\n'.splitlines(True)).next
|
|
+ tokens = [
|
|
+ token.tok_name[tup[0]] for tup in tokenize.generate_tokens(readline)
|
|
+ ]
|
|
+ if tokens == ['NL', 'COMMENT', 'NL', 'ENDMARKER']:
|
|
+ return True
|
|
+ elif tokens == ['NL', 'COMMENT', 'NL', 'NL', 'ENDMARKER']:
|
|
+ return False
|
|
+ raise AssertionError(
|
|
+ "Tokenizer returns unexexpected tokens: %r" % tokens
|
|
+ )
|
|
+
|
|
+comment_includes_nl = comment_includes_nl()
|
|
+
|
|
+######################################################################
|
|
## Doc Parser
|
|
######################################################################
|
|
|
|
@@ -520,6 +540,10 @@
|
|
# inside that block, not outside it.
|
|
start_group = None
|
|
|
|
+ # If the comment tokens do not include the NL, every comment token
|
|
+ # sets this to True in order to swallow the next NL token unprocessed.
|
|
+ comment_nl_waiting = False
|
|
+
|
|
# Check if the source file declares an encoding.
|
|
encoding = get_module_encoding(module_doc.filename)
|
|
|
|
@@ -570,7 +594,9 @@
|
|
# then discard them: blank lines are not allowed between a
|
|
# comment block and the thing it describes.
|
|
elif toktype == tokenize.NL:
|
|
- if comments and not line_toks:
|
|
+ if comment_nl_waiting:
|
|
+ comment_nl_waiting = False
|
|
+ elif comments and not line_toks:
|
|
log.warning('Ignoring docstring comment block followed by '
|
|
'a blank line in %r on line %r' %
|
|
(module_doc.filename, srow-1))
|
|
@@ -578,6 +604,7 @@
|
|
|
|
# Comment token: add to comments if appropriate.
|
|
elif toktype == tokenize.COMMENT:
|
|
+ comment_nl_waiting = not comment_includes_nl
|
|
if toktext.startswith(COMMENT_DOCSTRING_MARKER):
|
|
comment_line = toktext[len(COMMENT_DOCSTRING_MARKER):].rstrip()
|
|
if comment_line.startswith(" "):
|