From 58a12412510bb331b74beca9732b1ce10b1f0bc9 Mon Sep 17 00:00:00 2001 From: "Jason R. Coombs" Date: Fri, 27 Jun 2014 16:38:43 -0400 Subject: Move util scripts to docs --- docs/util/convert-trac.py | 134 ++++++++++++++++++++++++++++++++++++++++++++ docs/util/test-doc.py | 23 ++++++++ sphinx/util/convert-trac.py | 134 -------------------------------------------- sphinx/util/test-doc.py | 23 -------- 4 files changed, 157 insertions(+), 157 deletions(-) create mode 100644 docs/util/convert-trac.py create mode 100644 docs/util/test-doc.py delete mode 100644 sphinx/util/convert-trac.py delete mode 100644 sphinx/util/test-doc.py diff --git a/docs/util/convert-trac.py b/docs/util/convert-trac.py new file mode 100644 index 00000000..ebb77d42 --- /dev/null +++ b/docs/util/convert-trac.py @@ -0,0 +1,134 @@ +#!python + +""" +%prog + +A utility script for performing some commonly-encountered patterns in +Trac Wiki format into reStructuredText (rst). + +filename is the name of the text file to be saved. If -U is not used, +the file is converted in-place and filename is also the name of the +source. +""" + +from __future__ import print_function +import sys +import re +import inspect +import optparse +import shutil +import urllib2 +from StringIO import StringIO + + +def get_options(): + global options + parser = optparse.OptionParser(usage=inspect.cleandoc(__doc__)) + parser.add_option('-U', '--url', + help="Trac URL from which to retrieve source") + options, args = parser.parse_args() + try: + options.filename = args.pop() + except IndexError: + parser.error("Filename required") + +# each of the replacement functions should have a docstring +# which is a regular expression to be matched. + + +def replace_external_link(matcher): + r"\[(?P(?P\w+)\://.+?) (?P.+?)\]" + return '`{name} <{href}>`_'.format(**matcher.groupdict()) + + +def replace_wiki_link(matcher): + r"\[wiki\:(?P.+?) (?P.+?)\]" + return '`{name} `_'.format( + **matcher.groupdict() + ) + +# character array indexed by level for characters +heading_characters = [None, '*', '=', '-', '^'] + + +def replace_headings(matcher): + r"^(?P=+) (?P.*) (?P=level)$" + level = len(matcher.groupdict()['level']) + char = heading_characters[level] + name = matcher.groupdict()['name'] + lines = [name, char * len(name)] + if level == 1: + lines.insert(0, char * len(name)) + return '\n'.join(lines) + + +def indent(block): + add_indent = lambda s: ' ' + s + lines = StringIO(block) + i_lines = map(add_indent, lines) + return ''.join(i_lines) + + +def replace_inline_code(matcher): + r"\{\{\{(?P[^\n]*?)\}\}\}" + return '``{code}``'.format(**matcher.groupdict()) + + +def replace_code_block(matcher): + r"\{\{\{\n(?P(.|\n)*?)^\}\}\}" + return '::\n\n' + indent(matcher.groupdict()['code']) + + +def replace_page_outline(matcher): + r"\[\[PageOutline\]\]\n" + return '' + + +def replace_bang_symbols(matcher): + r"!(?P\w+)" + return matcher.groupdict()['symbol'] + +# a number of the files end in +"""{{{ +#!html +

Older versions

+}}}""" # and everything after is garbage, so just remove it. + + +def remove_2x_compat_notes(matcher): + r"\{\{\{\n#!html\n - -A utility script for performing some commonly-encountered patterns in -Trac Wiki format into reStructuredText (rst). - -filename is the name of the text file to be saved. If -U is not used, -the file is converted in-place and filename is also the name of the -source. -""" - -from __future__ import print_function -import sys -import re -import inspect -import optparse -import shutil -import urllib2 -from StringIO import StringIO - - -def get_options(): - global options - parser = optparse.OptionParser(usage=inspect.cleandoc(__doc__)) - parser.add_option('-U', '--url', - help="Trac URL from which to retrieve source") - options, args = parser.parse_args() - try: - options.filename = args.pop() - except IndexError: - parser.error("Filename required") - -# each of the replacement functions should have a docstring -# which is a regular expression to be matched. - - -def replace_external_link(matcher): - r"\[(?P(?P\w+)\://.+?) (?P.+?)\]" - return '`{name} <{href}>`_'.format(**matcher.groupdict()) - - -def replace_wiki_link(matcher): - r"\[wiki\:(?P.+?) (?P.+?)\]" - return '`{name} `_'.format( - **matcher.groupdict() - ) - -# character array indexed by level for characters -heading_characters = [None, '*', '=', '-', '^'] - - -def replace_headings(matcher): - r"^(?P=+) (?P.*) (?P=level)$" - level = len(matcher.groupdict()['level']) - char = heading_characters[level] - name = matcher.groupdict()['name'] - lines = [name, char * len(name)] - if level == 1: - lines.insert(0, char * len(name)) - return '\n'.join(lines) - - -def indent(block): - add_indent = lambda s: ' ' + s - lines = StringIO(block) - i_lines = map(add_indent, lines) - return ''.join(i_lines) - - -def replace_inline_code(matcher): - r"\{\{\{(?P[^\n]*?)\}\}\}" - return '``{code}``'.format(**matcher.groupdict()) - - -def replace_code_block(matcher): - r"\{\{\{\n(?P(.|\n)*?)^\}\}\}" - return '::\n\n' + indent(matcher.groupdict()['code']) - - -def replace_page_outline(matcher): - r"\[\[PageOutline\]\]\n" - return '' - - -def replace_bang_symbols(matcher): - r"!(?P\w+)" - return matcher.groupdict()['symbol'] - -# a number of the files end in -"""{{{ -#!html -

Older versions

-}}}""" # and everything after is garbage, so just remove it. - - -def remove_2x_compat_notes(matcher): - r"\{\{\{\n#!html\n