local copy of Docutils for PEP processing

This commit is contained in:
David Goodger 2002-11-08 23:47:53 +00:00
parent 0986247194
commit 8453e310f7
56 changed files with 14059 additions and 0 deletions

1
docutils/.cvsignore Normal file
View File

@ -0,0 +1 @@
*.pyc

130
docutils/__init__.py Normal file
View File

@ -0,0 +1,130 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
This is the Docutils (Python Documentation Utilities) package.
Package Structure
=================
Modules:
- __init__.py: Contains the package docstring only (this text).
- core.py: Contains the ``Publisher`` class and ``publish()`` convenience
function.
- frontend.py: Command-line and common processing for Docutils front-ends.
- io.py: Provides a uniform API for low-level input and output.
- nodes.py: Docutils document tree (doctree) node class library.
- optik.py: Option parsing and command-line help; from Greg Ward's
http://optik.sf.net/ project, included for convenience.
- roman.py: Conversion to and from Roman numerals. Courtesy of Mark
Pilgrim (http://diveintopython.org/).
- statemachine.py: A finite state machine specialized for
regular-expression-based text filters.
- urischemes.py: Contains a complete mapping of known URI addressing
scheme names to descriptions.
- utils.py: Contains the ``Reporter`` system warning class and miscellaneous
utilities.
Subpackages:
- languages: Language-specific mappings of terms.
- parsers: Syntax-specific input parser modules or packages.
- readers: Context-specific input handlers which understand the data
source and manage a parser.
- transforms: Modules used by readers and writers to modify DPS
doctrees.
- writers: Format-specific output translators.
"""
__docformat__ = 'reStructuredText'
__version__ = '0.2.8'
"""``major.minor.micro`` version number. The micro number is bumped any time
there's a change in the API incompatible with one of the front ends. The
minor number is bumped whenever there is a project release. The major number
will be bumped when the project is complete, and perhaps if there is a major
change in the design."""
class ApplicationError(StandardError): pass
class DataError(ApplicationError): pass
class SettingsSpec:
"""
Runtime setting specification base class.
SettingsSpec subclass objects used by `docutils.frontend.OptionParser`.
"""
settings_spec = ()
"""Runtime settings specification. Override in subclasses.
Specifies runtime settings and associated command-line options, as used by
`docutils.frontend.OptionParser`. This tuple contains one or more sets of
option group title, description, and a list/tuple of tuples: ``('help
text', [list of option strings], {keyword arguments})``. Group title
and/or description may be `None`; no group title implies no group, just a
list of single options. Runtime settings names are derived implicitly
from long option names ("--a-setting" becomes ``settings.a_setting``) or
explicitly from the "destination" keyword argument."""
settings_default_overrides = None
"""A dictionary of auxiliary defaults, to override defaults for settings
defined in other components. Override in subclasses."""
relative_path_settings = ()
"""Settings containing filesystem paths. Override in subclasses.
Settings listed here are to be interpreted relative to the current working
directory."""
class TransformSpec:
"""
Runtime transform specification base class.
TransformSpec subclass objects used by `docutils.transforms.Transformer`.
"""
default_transforms = ()
"""Transforms required by this class. Override in subclasses."""
class Component(SettingsSpec, TransformSpec):
"""Base class for Docutils components."""
component_type = None
"""Override in subclasses."""
supported = ()
"""Names for this component. Override in subclasses."""
def supports(self, format):
"""
Is `format` supported by this component?
To be used by transforms to ask the dependent component if it supports
a certain input context or output format.
"""
return format in self.supported

325
docutils/core.py Normal file
View File

@ -0,0 +1,325 @@
# Authors: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Calling the ``publish_*`` convenience functions (or instantiating a
`Publisher` object) with component names will result in default
behavior. For custom behavior (setting component options), create
custom component objects first, and pass *them* to
``publish_*``/`Publisher`.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import Component
from docutils import frontend, io, readers, parsers, writers
from docutils.frontend import OptionParser, ConfigParser
class Publisher:
"""
A facade encapsulating the high-level logic of a Docutils system.
"""
def __init__(self, reader=None, parser=None, writer=None,
source=None, source_class=io.FileInput,
destination=None, destination_class=io.FileOutput,
settings=None):
"""
Initial setup. If any of `reader`, `parser`, or `writer` are not
specified, the corresponding ``set_...`` method should be called with
a component name (`set_reader` sets the parser as well).
"""
self.reader = reader
"""A `readers.Reader` instance."""
self.parser = parser
"""A `parsers.Parser` instance."""
self.writer = writer
"""A `writers.Writer` instance."""
self.source = source
"""The source of input data, an `io.Input` instance."""
self.source_class = source_class
"""The class for dynamically created source objects."""
self.destination = destination
"""The destination for docutils output, an `io.Output` instance."""
self.destination_class = destination_class
"""The class for dynamically created destination objects."""
self.settings = settings
"""An object containing Docutils settings as instance attributes.
Set by `self.process_command_line()` or `self.get_settings()`."""
def set_reader(self, reader_name, parser, parser_name):
"""Set `self.reader` by name."""
reader_class = readers.get_reader_class(reader_name)
self.reader = reader_class(parser, parser_name)
self.parser = self.reader.parser
def set_writer(self, writer_name):
"""Set `self.writer` by name."""
writer_class = writers.get_writer_class(writer_name)
self.writer = writer_class()
def set_components(self, reader_name, parser_name, writer_name):
if self.reader is None:
self.set_reader(reader_name, self.parser, parser_name)
if self.parser is None:
if self.reader.parser is None:
self.reader.set_parser(parser_name)
self.parser = self.reader.parser
if self.writer is None:
self.set_writer(writer_name)
def setup_option_parser(self, usage=None, description=None,
settings_spec=None, **defaults):
#@@@ Add self.source & self.destination to components in future?
option_parser = OptionParser(
components=(settings_spec, self.parser, self.reader, self.writer),
usage=usage, description=description)
config = ConfigParser()
config.read_standard_files()
config_settings = config.get_section('options')
frontend.make_paths_absolute(config_settings,
option_parser.relative_path_settings)
defaults.update(config_settings)
option_parser.set_defaults(**defaults)
return option_parser
def get_settings(self, usage=None, description=None,
settings_spec=None, **defaults):
"""
Set and return default settings (overrides in `defaults` keyword
argument).
Set components first (`self.set_reader` & `self.set_writer`).
Explicitly setting `self.settings` disables command line option
processing from `self.publish()`.
"""
option_parser = self.setup_option_parser(usage, description,
settings_spec, **defaults)
self.settings = option_parser.get_default_values()
return self.settings
def process_command_line(self, argv=None, usage=None, description=None,
settings_spec=None, **defaults):
"""
Pass an empty list to `argv` to avoid reading `sys.argv` (the
default).
Set components first (`self.set_reader` & `self.set_writer`).
"""
option_parser = self.setup_option_parser(usage, description,
settings_spec, **defaults)
if argv is None:
argv = sys.argv[1:]
self.settings = option_parser.parse_args(argv)
def set_io(self, source_path=None, destination_path=None):
if self.source is None:
self.set_source(source_path=source_path)
if self.destination is None:
self.set_destination(destination_path=destination_path)
def set_source(self, source=None, source_path=None):
if source_path is None:
source_path = self.settings._source
else:
self.settings._source = source_path
self.source = self.source_class(self.settings, source=source,
source_path=source_path)
def set_destination(self, destination=None, destination_path=None):
if destination_path is None:
destination_path = self.settings._destination
else:
self.settings._destination = destination_path
self.destination = self.destination_class(
self.settings, destination=destination,
destination_path=destination_path)
def apply_transforms(self, document):
document.transformer.populate_from_components(
(self.source, self.reader, self.reader.parser, self.writer,
self.destination))
document.transformer.apply_transforms()
def publish(self, argv=None, usage=None, description=None,
settings_spec=None, settings_overrides=None):
"""
Process command line options and arguments (if `self.settings` not
already set), run `self.reader` and then `self.writer`. Return
`self.writer`'s output.
"""
if self.settings is None:
self.process_command_line(argv, usage, description, settings_spec,
**(settings_overrides or {}))
elif settings_overrides:
self.settings._update(settings_overrides, 'loose')
self.set_io()
document = self.reader.read(self.source, self.parser, self.settings)
self.apply_transforms(document)
output = self.writer.write(document, self.destination)
if self.settings.dump_settings:
from pprint import pformat
print >>sys.stderr, '\n::: Runtime settings:'
print >>sys.stderr, pformat(self.settings.__dict__)
if self.settings.dump_internals:
from pprint import pformat
print >>sys.stderr, '\n::: Document internals:'
print >>sys.stderr, pformat(document.__dict__)
if self.settings.dump_transforms:
from pprint import pformat
print >>sys.stderr, '\n::: Transforms applied:'
print >>sys.stderr, pformat(document.transformer.applied)
if self.settings.dump_pseudo_xml:
print >>sys.stderr, '\n::: Pseudo-XML:'
print >>sys.stderr, document.pformat().encode(
'raw_unicode_escape')
return output
default_usage = '%prog [options] [<source> [<destination>]]'
default_description = ('Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout).')
def publish_cmdline(reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None, argv=None,
usage=default_usage, description=default_description):
"""
Set up & run a `Publisher`. For command-line front ends.
Parameters:
- `reader`: A `docutils.readers.Reader` object.
- `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
- `parser`: A `docutils.parsers.Parser` object.
- `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
- `writer`: A `docutils.writers.Writer` object.
- `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
- `settings`: Runtime settings object.
- `settings_spec`: Extra settings specification; a `docutils.SettingsSpec`
subclass. Used only if no `settings` specified.
- `settings_overrides`: A dictionary containing program-specific overrides
of component settings.
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command
line.
- `description`: Program description, output for the "--help" option
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings)
pub.set_components(reader_name, parser_name, writer_name)
pub.publish(argv, usage, description, settings_spec, settings_overrides)
def publish_file(source=None, source_path=None,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings_overrides=None):
"""
Set up & run a `Publisher`. For programmatic use with file-like I/O.
Parameters:
- `source`: A file-like object (must have "read" and "close" methods).
- `source_path`: Path to the input file. Opened if no `source` supplied.
If neither `source` nor `source_path` are supplied, `sys.stdin` is used.
- `destination`: A file-like object (must have "write" and "close"
methods).
- `destination_path`: Path to the input file. Opened if no `destination`
supplied. If neither `destination` nor `destination_path` are supplied,
`sys.stdout` is used.
- `reader`: A `docutils.readers.Reader` object.
- `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
- `parser`: A `docutils.parsers.Parser` object.
- `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
- `writer`: A `docutils.writers.Writer` object.
- `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
- `settings`: Runtime settings object.
- `settings_spec`: Extra settings specification; a `docutils.SettingsSpec`
subclass. Used only if no `settings` specified.
- `settings_overrides`: A dictionary containing program-specific overrides
of component settings.
"""
pub = Publisher(reader, parser, writer, settings=settings)
pub.set_components(reader_name, parser_name, writer_name)
if settings is None:
settings = pub.get_settings(settings_spec=settings_spec)
if settings_overrides:
settings._update(settings_overrides, 'loose')
pub.set_source(source, source_path)
pub.set_destination(destination, destination_path)
pub.publish()
def publish_string(source, source_path=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None,
settings_overrides=None):
"""
Set up & run a `Publisher`, and return the string output.
For programmatic use with string I/O.
For encoded string output, be sure to set the "output_encoding" setting to
the desired encoding. Set it to "unicode" for unencoded Unicode string
output.
Parameters:
- `source`: An input string; required. This can be an encoded 8-bit
string (set the "input_encoding" setting to the correct encoding) or a
Unicode string (set the "input_encoding" setting to "unicode").
- `source_path`: Path to the file or object that produced `source`;
optional. Only used for diagnostic output.
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `reader`: A `docutils.readers.Reader` object.
- `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
- `parser`: A `docutils.parsers.Parser` object.
- `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
- `writer`: A `docutils.writers.Writer` object.
- `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
- `settings`: Runtime settings object.
- `settings_spec`: Extra settings specification; a `docutils.SettingsSpec`
subclass. Used only if no `settings` specified.
- `settings_overrides`: A dictionary containing program-specific overrides
of component settings.
"""
pub = Publisher(reader, parser, writer, settings=settings,
source_class=io.StringInput,
destination_class=io.StringOutput)
pub.set_components(reader_name, parser_name, writer_name)
if settings is None:
settings = pub.get_settings(settings_spec=settings_spec)
if settings_overrides:
settings._update(settings_overrides, 'loose')
pub.set_source(source, source_path)
pub.set_destination(destination_path=destination_path)
return pub.publish()

335
docutils/frontend.py Normal file
View File

@ -0,0 +1,335 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Command-line and common processing for Docutils front-end tools.
Exports the following classes:
- `OptionParser`: Standard Docutils command-line processing.
- `Values`: Runtime settings; objects are simple structs
(``object.attribute``).
- `ConfigParser`: Standard Docutils config file processing.
"""
__docformat__ = 'reStructuredText'
import os
import os.path
import ConfigParser as CP
import docutils
from docutils import optik
from docutils.optik import Values
def store_multiple(option, opt, value, parser, *args, **kwargs):
"""
Store multiple values in `parser.values`. (Option callback.)
Store `None` for each attribute named in `args`, and store the value for
each key (attribute name) in `kwargs`.
"""
for attribute in args:
setattr(parser.values, attribute, None)
for key, value in kwargs.items():
setattr(parser.values, key, value)
def read_config_file(option, opt, value, parser):
"""
Read a configuration file during option processing. (Option callback.)
"""
config_parser = ConfigParser()
config_parser.read(value)
settings = config_parser.get_section('options')
make_paths_absolute(settings, parser.relative_path_settings,
os.path.dirname(value))
parser.values.__dict__.update(settings)
def make_paths_absolute(pathdict, keys, base_path=None):
"""
Interpret filesystem path settings relative to the `base_path` given.
Paths are values in `pathdict` whose keys are in `keys`. Get `keys` from
`OptionParser.relative_path_settings`.
"""
if base_path is None:
base_path = os.getcwd()
for key in keys:
if pathdict.has_key(key) and pathdict[key]:
pathdict[key] = os.path.normpath(
os.path.abspath(os.path.join(base_path, pathdict[key])))
class OptionParser(optik.OptionParser, docutils.SettingsSpec):
"""
Parser for command-line and library use. The `settings_spec`
specification here and in other Docutils components are merged to build
the set of command-line options and runtime settings for this process.
Common settings (defined below) and component-specific settings must not
conflict. Short options are reserved for common settings, and components
are restrict to using long options.
"""
threshold_choices = 'info 1 warning 2 error 3 severe 4 none 5'.split()
"""Possible inputs for for --report and --halt threshold values."""
thresholds = {'info': 1, 'warning': 2, 'error': 3, 'severe': 4, 'none': 5}
"""Lookup table for --report and --halt threshold values."""
settings_spec = (
'General Docutils Options',
None,
(('Include a "Generated by Docutils" credit and link at the end '
'of the document.',
['--generator', '-g'], {'action': 'store_true'}),
('Do not include a generator credit.',
['--no-generator'], {'action': 'store_false', 'dest': 'generator'}),
('Include the date at the end of the document (UTC).',
['--date', '-d'], {'action': 'store_const', 'const': '%Y-%m-%d',
'dest': 'datestamp'}),
('Include the time & date at the end of the document (UTC).',
['--time', '-t'], {'action': 'store_const',
'const': '%Y-%m-%d %H:%M UTC',
'dest': 'datestamp'}),
('Do not include a datestamp of any kind.',
['--no-datestamp'], {'action': 'store_const', 'const': None,
'dest': 'datestamp'}),
('Include a "View document source" link (relative to destination).',
['--source-link', '-s'], {'action': 'store_true'}),
('Use the supplied <URL> verbatim for a "View document source" '
'link; implies --source-link.',
['--source-url'], {'metavar': '<URL>'}),
('Do not include a "View document source" link.',
['--no-source-link'],
{'action': 'callback', 'callback': store_multiple,
'callback_args': ('source_link', 'source_url')}),
('Enable backlinks from section headers to table of contents '
'entries. This is the default.',
['--toc-entry-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'entry',
'default': 'entry'}),
('Enable backlinks from section headers to the top of the table of '
'contents.',
['--toc-top-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_const', 'const': 'top'}),
('Disable backlinks to the table of contents.',
['--no-toc-backlinks'],
{'dest': 'toc_backlinks', 'action': 'store_false'}),
('Enable backlinks from footnotes and citations to their '
'references. This is the default.',
['--footnote-backlinks'],
{'action': 'store_true', 'default': 1}),
('Disable backlinks from footnotes and citations.',
['--no-footnote-backlinks'],
{'dest': 'footnote_backlinks', 'action': 'store_false'}),
('Set verbosity threshold; report system messages at or higher than '
'<level> (by name or number: "info" or "1", warning/2, error/3, '
'severe/4; also, "none" or "5"). Default is 2 (warning).',
['--report', '-r'], {'choices': threshold_choices, 'default': 2,
'dest': 'report_level', 'metavar': '<level>'}),
('Report all system messages, info-level and higher. (Same as '
'"--report=info".)',
['--verbose', '-v'], {'action': 'store_const', 'const': 'info',
'dest': 'report_level'}),
('Do not report any system messages. (Same as "--report=none".)',
['--quiet', '-q'], {'action': 'store_const', 'const': 'none',
'dest': 'report_level'}),
('Set the threshold (<level>) at or above which system messages are '
'converted to exceptions, halting execution immediately. Levels '
'as in --report. Default is 4 (severe).',
['--halt'], {'choices': threshold_choices, 'dest': 'halt_level',
'default': 4, 'metavar': '<level>'}),
('Same as "--halt=info": halt processing at the slightest problem.',
['--strict'], {'action': 'store_const', 'const': 'info',
'dest': 'halt_level'}),
('Report debug-level system messages.',
['--debug'], {'action': 'store_true'}),
('Do not report debug-level system messages.',
['--no-debug'], {'action': 'store_false', 'dest': 'debug'}),
('Send the output of system messages (warnings) to <file>.',
['--warnings'], {'dest': 'warning_stream', 'metavar': '<file>'}),
('Specify the encoding of input text. Default is locale-dependent.',
['--input-encoding', '-i'], {'metavar': '<name>'}),
('Specify the encoding for output. Default is UTF-8.',
['--output-encoding', '-o'],
{'metavar': '<name>', 'default': 'utf-8'}),
('Specify the language of input text (ISO 639 2-letter identifier).'
' Default is "en" (English).',
['--language', '-l'], {'dest': 'language_code', 'default': 'en',
'metavar': '<name>'}),
('Read configuration settings from <file>, if it exists.',
['--config'], {'metavar': '<file>', 'type': 'string',
'action': 'callback', 'callback': read_config_file}),
("Show this program's version number and exit.",
['--version', '-V'], {'action': 'version'}),
('Show this help message and exit.',
['--help', '-h'], {'action': 'help'}),
# Hidden options, for development use only:
(optik.SUPPRESS_HELP,
['--dump-settings'],
{'action': 'store_true'}),
(optik.SUPPRESS_HELP,
['--dump-internals'],
{'action': 'store_true'}),
(optik.SUPPRESS_HELP,
['--dump-transforms'],
{'action': 'store_true'}),
(optik.SUPPRESS_HELP,
['--dump-pseudo-xml'],
{'action': 'store_true'}),
(optik.SUPPRESS_HELP,
['--expose-internal-attribute'],
{'action': 'append', 'dest': 'expose_internals'}),))
"""Runtime settings and command-line options common to all Docutils front
ends. Setting specs specific to individual Docutils components are also
used (see `populate_from_components()`)."""
relative_path_settings = ('warning_stream',)
version_template = '%%prog (Docutils %s)' % docutils.__version__
"""Default version message."""
def __init__(self, components=(), *args, **kwargs):
"""
`components` is a list of Docutils components each containing a
``.settings_spec`` attribute. `defaults` is a mapping of setting
default overrides.
"""
optik.OptionParser.__init__(
self, help=None,
format=optik.Titled(),
# Needed when Optik is updated (replaces above 2 lines):
#self, add_help=None,
#formatter=optik.TitledHelpFormatter(width=78),
*args, **kwargs)
if not self.version:
self.version = self.version_template
# Internal settings with no defaults from settings specifications;
# initialize manually:
self.set_defaults(_source=None, _destination=None)
# Make an instance copy (it will be modified):
self.relative_path_settings = list(self.relative_path_settings)
self.populate_from_components(tuple(components) + (self,))
def populate_from_components(self, components):
for component in components:
if component is None:
continue
i = 0
settings_spec = component.settings_spec
self.relative_path_settings.extend(
component.relative_path_settings)
while i < len(settings_spec):
title, description, option_spec = settings_spec[i:i+3]
if title:
group = optik.OptionGroup(self, title, description)
self.add_option_group(group)
else:
group = self # single options
for (help_text, option_strings, kwargs) in option_spec:
group.add_option(help=help_text, *option_strings,
**kwargs)
i += 3
for component in components:
if component and component.settings_default_overrides:
self.defaults.update(component.settings_default_overrides)
def check_values(self, values, args):
if hasattr(values, 'report_level'):
values.report_level = self.check_threshold(values.report_level)
if hasattr(values, 'halt_level'):
values.halt_level = self.check_threshold(values.halt_level)
values._source, values._destination = self.check_args(args)
make_paths_absolute(values.__dict__, self.relative_path_settings,
os.getcwd())
return values
def check_threshold(self, level):
try:
return int(level)
except ValueError:
try:
return self.thresholds[level.lower()]
except (KeyError, AttributeError):
self.error('Unknown threshold: %r.' % level)
def check_args(self, args):
source = destination = None
if args:
source = args.pop(0)
if args:
destination = args.pop(0)
if args:
self.error('Maximum 2 arguments allowed.')
if source and source == destination:
self.error('Do not specify the same file for both source and '
'destination. It will clobber the source file.')
return source, destination
class ConfigParser(CP.ConfigParser):
standard_config_files = (
'/etc/docutils.conf', # system-wide
'./docutils.conf', # project-specific
os.path.expanduser('~/.docutils')) # user-specific
"""Docutils configuration files, using ConfigParser syntax (section
'options'). Later files override earlier ones."""
def read_standard_files(self):
self.read(self.standard_config_files)
def optionxform(self, optionstr):
"""
Transform '-' to '_' so the cmdline form of option names can be used.
"""
return optionstr.lower().replace('-', '_')
def get_section(self, section, raw=0, vars=None):
"""
Return a given section as a dictionary (empty if the section
doesn't exist).
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw` is true. Additional substitutions may be provided using the
`vars` argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
try:
sectdict = self._ConfigParser__sections[section].copy()
except KeyError:
sectdict = {}
d = self._ConfigParser__defaults.copy()
d.update(sectdict)
# Update with the entry specific variables
if vars:
d.update(vars)
if raw:
return sectdict
# do the string interpolation
for option in sectdict.keys():
rawval = sectdict[option]
value = rawval # Make it a pretty variable name
depth = 0
while depth < 10: # Loop through this until it's done
depth += 1
if value.find("%(") >= 0:
try:
value = value % d
except KeyError, key:
raise CP.InterpolationError(key, option, section,
rawval)
else:
break
if value.find("%(") >= 0:
raise CP.InterpolationDepthError(option, section, rawval)
sectdict[option] = value
return sectdict

271
docutils/io.py Normal file
View File

@ -0,0 +1,271 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
I/O classes provide a uniform API for low-level input and output. Subclasses
will exist for a variety of input/output mechanisms.
"""
__docformat__ = 'reStructuredText'
import sys
import locale
from docutils import TransformSpec
class Input(TransformSpec):
"""
Abstract base class for input wrappers.
"""
component_type = 'input'
default_source_path = None
def __init__(self, settings, source=None, source_path=None):
self.settings = settings
"""A settings object with "input_encoding" and "output_encoding"
attributes (typically a `docutils.optik.Values` object)."""
self.source = source
"""The source of input data."""
self.source_path = source_path
"""A text reference to the source."""
if not source_path:
self.source_path = self.default_source_path
def __repr__(self):
return '%s: source=%r, source_path=%r' % (self.__class__, self.source,
self.source_path)
def read(self, reader):
raise NotImplementedError
def decode(self, data):
"""
Decode a string, `data`, heuristically.
Raise UnicodeError if unsuccessful.
The client application should call ``locale.setlocale`` at the
beginning of processing::
locale.setlocale(locale.LC_ALL, '')
"""
if self.settings.input_encoding \
and self.settings.input_encoding.lower() == 'unicode':
return unicode(data)
encodings = [self.settings.input_encoding, 'utf-8']
try:
encodings.append(locale.nl_langinfo(locale.CODESET))
except:
pass
try:
encodings.append(locale.getlocale()[1])
except:
pass
try:
encodings.append(locale.getdefaultlocale()[1])
except:
pass
encodings.append('latin-1')
for enc in encodings:
if not enc:
continue
try:
decoded = unicode(data, enc)
return decoded
except (UnicodeError, LookupError):
pass
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: %s.'
% ', '.join([repr(enc) for enc in encodings if enc]))
class Output(TransformSpec):
"""
Abstract base class for output wrappers.
"""
component_type = 'output'
default_destination_path = None
def __init__(self, settings, destination=None, destination_path=None):
self.settings = settings
"""A settings object with "input_encoding" and "output_encoding"
attributes (typically a `docutils.optik.Values` object)."""
self.destination = destination
"""The destination for output data."""
self.destination_path = destination_path
"""A text reference to the destination."""
if not destination_path:
self.destination_path = self.default_destination_path
def __repr__(self):
return ('%s: destination=%r, destination_path=%r'
% (self.__class__, self.destination, self.destination_path))
def write(self, data):
raise NotImplementedError
def encode(self, data):
if self.settings.output_encoding \
and self.settings.output_encoding.lower() == 'unicode':
return data
else:
return data.encode(self.settings.output_encoding or '')
class FileInput(Input):
"""
Input for single, simple file-like objects.
"""
def __init__(self, settings, source=None, source_path=None, autoclose=1):
"""
:Parameters:
- `source`: either a file-like object (which is read directly), or
`None` (which implies `sys.stdin` if no `source_path` given).
- `source_path`: a path to a file, which is opened and then read.
- `autoclose`: close automatically after read (boolean); always
false if `sys.stdin` is the source.
"""
Input.__init__(self, settings, source, source_path)
self.autoclose = autoclose
if source is None:
if source_path:
self.source = open(source_path)
else:
self.source = sys.stdin
self.autoclose = None
if not source_path:
try:
self.source_path = self.source.name
except AttributeError:
pass
def read(self, reader):
"""Read and decode a single file and return the data."""
data = self.source.read()
if self.autoclose:
self.close()
return self.decode(data)
def close(self):
self.source.close()
class FileOutput(Output):
"""
Output for single, simple file-like objects.
"""
def __init__(self, settings, destination=None, destination_path=None,
autoclose=1):
"""
:Parameters:
- `destination`: either a file-like object (which is written
directly) or `None` (which implies `sys.stdout` if no
`destination_path` given).
- `destination_path`: a path to a file, which is opened and then
written.
- `autoclose`: close automatically after write (boolean); always
false if `sys.stdout` is the destination.
"""
Output.__init__(self, settings, destination, destination_path)
self.opened = 1
self.autoclose = autoclose
if destination is None:
if destination_path:
self.opened = None
else:
self.destination = sys.stdout
self.autoclose = None
if not destination_path:
try:
self.destination_path = self.destination.name
except AttributeError:
pass
def open(self):
self.destination = open(self.destination_path, 'w')
self.opened = 1
def write(self, data):
"""Encode `data`, write it to a single file, and return it."""
output = self.encode(data)
if not self.opened:
self.open()
self.destination.write(output)
if self.autoclose:
self.close()
return output
def close(self):
self.destination.close()
self.opened = None
class StringInput(Input):
"""
Direct string input.
"""
default_source_path = '<string>'
def read(self, reader):
"""Decode and return the source string."""
return self.decode(self.source)
class StringOutput(Output):
"""
Direct string output.
"""
default_destination_path = '<string>'
def write(self, data):
"""Encode `data`, store it in `self.destination`, and return it."""
self.destination = self.encode(data)
return self.destination
class NullInput(Input):
"""
Degenerate input: read nothing.
"""
default_source_path = 'null input'
def read(self, reader):
"""Return a null string."""
return u''
class NullOutput(Output):
"""
Degenerate output: write nothing.
"""
default_destination_path = 'null output'
def write(self, data):
"""Do nothing ([don't even] send data to the bit bucket)."""
pass

View File

@ -0,0 +1 @@
*.pyc

View File

@ -0,0 +1,20 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
This package contains modules for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
_languages = {}
def get_language(language_code):
if _languages.has_key(language_code):
return _languages[language_code]
module = __import__(language_code, globals(), locals())
_languages[language_code] = module
return module

59
docutils/languages/de.py Normal file
View File

@ -0,0 +1,59 @@
# Authors: David Goodger; Gunnar Schwant
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
German language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
labels = {
'author': 'Autor',
'authors': 'Autoren',
'organization': 'Organisation',
'address': 'Adresse',
'contact': 'Kontakt',
'version': 'Version',
'revision': 'Revision',
'status': 'Status',
'date': 'Datum',
'dedication': 'Widmung',
'copyright': 'Copyright',
'abstract': 'Zusammenfassung',
'attention': 'Achtung!',
'caution': 'Vorsicht!',
'danger': '!GEFAHR!',
'error': 'Fehler',
'hint': 'Hinweis',
'important': 'Wichtig',
'note': 'Bemerkung',
'tip': 'Tipp',
'warning': 'Warnung',
'contents': 'Inhalt'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
'autor': nodes.author,
'autoren': nodes.authors,
'organisation': nodes.organization,
'adresse': nodes.address,
'kontakt': nodes.contact,
'version': nodes.version,
'revision': nodes.revision,
'status': nodes.status,
'datum': nodes.date,
'copyright': nodes.copyright,
'widmung': nodes.topic,
'zusammenfassung': nodes.topic}
"""Field name (lowcased) to node class name mapping for bibliographic fields
(field_list)."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""

60
docutils/languages/en.py Normal file
View File

@ -0,0 +1,60 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
English-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
labels = {
'author': 'Author',
'authors': 'Authors',
'organization': 'Organization',
'address': 'Address',
'contact': 'Contact',
'version': 'Version',
'revision': 'Revision',
'status': 'Status',
'date': 'Date',
'copyright': 'Copyright',
'dedication': 'Dedication',
'abstract': 'Abstract',
'attention': 'Attention!',
'caution': 'Caution!',
'danger': '!DANGER!',
'error': 'Error',
'hint': 'Hint',
'important': 'Important',
'note': 'Note',
'tip': 'Tip',
'warning': 'Warning',
'contents': 'Contents'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
'author': nodes.author,
'authors': nodes.authors,
'organization': nodes.organization,
'address': nodes.address,
'contact': nodes.contact,
'version': nodes.version,
'revision': nodes.revision,
'status': nodes.status,
'date': nodes.date,
'copyright': nodes.copyright,
'dedication': nodes.topic,
'abstract': nodes.topic}
"""Field name (lowcased) to node class name mapping for bibliographic fields
(field_list)."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""

60
docutils/languages/fr.py Normal file
View File

@ -0,0 +1,60 @@
# Author: Stefane Fermigier
# Contact: sf@fermigier.com
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
French-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
labels = {
'author': 'Auteur',
'authors': 'Auteurs',
'organization': 'Organisation',
'address': 'Adresse',
'contact': 'Contact',
'version': 'Version',
'revision': 'R\u00e9vision',
'status': 'Statut',
'date': 'Date',
'copyright': 'Copyright',
'dedication': 'D\u00e9dicace',
'abstract': 'R\u00e9sum\u00e9',
'attention': 'Attention!',
'caution': 'Avertissement!',
'danger': '!DANGER!',
'error': 'Erreur',
'hint': 'Indication',
'important': 'Important',
'note': 'Note',
'tip': 'Astuce',
'warning': 'Avertissement',
'contents': 'Contenu'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
'auteur': nodes.author,
'auteurs': nodes.authors,
'organisation': nodes.organization,
'adresse': nodes.address,
'contact': nodes.contact,
'version': nodes.version,
'r\u00e9vision': nodes.revision,
'status': nodes.status,
'date': nodes.date,
'copyright': nodes.copyright,
'd\u00e9dicace': nodes.topic,
'r\u00e9sum\u00e9': nodes.topic}
"""Field name (lowcased) to node class name mapping for bibliographic fields
(field_list)."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""

60
docutils/languages/sk.py Normal file
View File

@ -0,0 +1,60 @@
# :Author: Miroslav Vasko
# :Contact: zemiak@zoznam.sk
# :Revision: $Revision$
# :Date: $Date$
# :Copyright: This module has been placed in the public domain.
"""
Slovak-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
labels = {
'author': u'Autor',
'authors': u'Autori',
'organization': u'Organiz\u00E1cia',
'address': u'Adresa',
'contact': u'Kontakt',
'version': u'Verzia',
'revision': u'Rev\u00EDzia',
'status': u'Stav',
'date': u'D\u00E1tum',
'copyright': u'Copyright',
'dedication': u'Venovanie',
'abstract': u'Abstraktne',
'attention': u'Pozor!',
'caution': u'Opatrne!',
'danger': u'!NEBEZPE\u010cENSTVO!',
'error': u'Chyba',
'hint': u'Rada',
'important': u'D\u00F4le\u017Eit\u00E9',
'note': u'Pozn\u00E1mka',
'tip': u'Tip',
'warning': u'Varovanie',
'contents': u'Obsah'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
u'autor': nodes.author,
u'autori': nodes.authors,
u'organiz\u00E1cia': nodes.organization,
u'adresa': nodes.address,
u'kontakt': nodes.contact,
u'verzia': nodes.version,
u'rev\u00EDzia': nodes.revision,
u'stav': nodes.status,
u'D\u00E1tum': nodes.date,
u'copyright': nodes.copyright,
u'venovanie': nodes.topic,
u'abstraktne': nodes.topic}
"""Field name (lowcased) to node class name mapping for bibliographic fields
(field_list)."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""

60
docutils/languages/sv.py Normal file
View File

@ -0,0 +1,60 @@
# Author: Adam Chodorowski
# Contact: chodorowski@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Swedish language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
labels = {
'author': u'F\u00f6rfattare',
'authors': u'F\u00f6rfattare',
'organization': u'Organisation',
'address': u'Adress',
'contact': u'Kontakt',
'version': u'Version',
'revision': u'Revision',
'status': u'Status',
'date': u'Datum',
'copyright': u'Copyright',
'dedication': u'Dedikation',
'abstract': u'Sammanfattning',
'attention': u'Observera!',
'caution': u'Varning!',
'danger': u'FARA!',
'error': u'Fel',
'hint': u'V\u00e4gledning',
'important': u'Viktigt',
'note': u'Notera',
'tip': u'Tips',
'warning': u'Varning',
'contents': u'Inneh\u00e5ll' }
"""Mapping of node class name to label text."""
bibliographic_fields = {
# 'Author' and 'Authors' identical in Swedish; assume the plural:
u'f\u00f6rfattare': nodes.authors,
u'organisation': nodes.organization,
u'adress': nodes.address,
u'kontakt': nodes.contact,
u'version': nodes.version,
u'revision': nodes.revision,
u'status': nodes.status,
u'datum': nodes.date,
u'copyright': nodes.copyright,
u'dedikation': nodes.topic,
u'sammanfattning': nodes.topic }
"""Field name (lowcased) to node class name mapping for bibliographic fields
(field_list)."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""

1433
docutils/nodes.py Normal file

File diff suppressed because it is too large Load Diff

1354
docutils/optik.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
*.pyc

View File

@ -0,0 +1,48 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils parser modules.
"""
__docformat__ = 'reStructuredText'
from docutils import Component
class Parser(Component):
component_type = 'parser'
def parse(self, inputstring, document):
"""Override to parse `inputstring` into document tree `document`."""
raise NotImplementedError('subclass must override this method')
def setup_parse(self, inputstring, document):
"""Initial parse setup. Call at start of `self.parse()`."""
self.inputstring = inputstring
self.document = document
document.reporter.attach_observer(document.note_parse_message)
def finish_parse(self):
"""Finalize parse details. Call at end of `self.parse()`."""
self.document.reporter.detach_observer(
self.document.note_parse_message)
_parser_aliases = {
'restructuredtext': 'rst',
'rest': 'rst',
'restx': 'rst',
'rtxt': 'rst',}
def get_parser_class(parser_name):
"""Return the Parser class from the `parser_name` module."""
parser_name = parser_name.lower()
if _parser_aliases.has_key(parser_name):
parser_name = _parser_aliases[parser_name]
module = __import__(parser_name, globals(), locals())
return module.Parser

View File

@ -0,0 +1 @@
*.pyc

View File

@ -0,0 +1,93 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
This is ``docutils.parsers.rst`` package. It exports a single class, `Parser`,
the reStructuredText parser.
Usage
=====
1. Create a parser::
parser = docutils.parsers.restructuredtext.Parser()
Several optional arguments may be passed to modify the parser's behavior.
Please see `docutils.parsers.Parser` for details.
2. Gather input (a multi-line string), by reading a file or the standard
input::
input = sys.stdin.read()
3. Create a new empty `docutils.nodes.document` tree::
document = docutils.utils.new_document(source)
See `docutils.utils.new_document()` for parameter details.
4. Run the parser, populating the document tree::
parser.parse(input, document)
Parser Overview
===============
The reStructuredText parser is implemented as a state machine, examining its
input one line at a time. To understand how the parser works, please first
become familiar with the `docutils.statemachine` module, then see the
`states` module.
"""
__docformat__ = 'reStructuredText'
import docutils.parsers
import docutils.statemachine
from docutils.parsers.rst import states
class Parser(docutils.parsers.Parser):
"""The reStructuredText parser."""
supported = ('restructuredtext', 'rst', 'rest', 'restx', 'rtxt', 'rstx')
"""Aliases this parser supports."""
settings_spec = (
'reStructuredText Parser Options',
None,
(('Recognize and link to PEP references (like "PEP 258").',
['--pep-references'],
{'action': 'store_true'}),
('Recognize and link to RFC references (like "RFC 822").',
['--rfc-references'],
{'action': 'store_true'}),
('Set number of spaces for tab expansion (default 8).',
['--tab-width'],
{'metavar': '<width>', 'type': 'int', 'default': 8}),))
def __init__(self, rfc2822=None, inliner=None):
if rfc2822:
self.initial_state = 'RFC2822Body'
else:
self.initial_state = 'Body'
self.state_classes = states.state_classes
self.inliner = inliner
def parse(self, inputstring, document):
"""Parse `inputstring` and populate `document`, a document tree."""
self.setup_parse(inputstring, document)
debug = document.reporter[''].debug
self.statemachine = states.RSTStateMachine(
state_classes=self.state_classes,
initial_state=self.initial_state,
debug=debug)
inputlines = docutils.statemachine.string2lines(
inputstring, tab_width=document.settings.tab_width,
convert_whitespace=1)
self.statemachine.run(inputlines, document, inliner=self.inliner)
self.finish_parse()

View File

@ -0,0 +1 @@
*.pyc

View File

@ -0,0 +1,227 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
This package contains directive implementation modules.
The interface for directive functions is as follows::
def directive_fn(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
code...
# Set function attributes:
directive_fn.arguments = ...
directive_fn.options = ...
direcitve_fn.content = ...
Parameters:
- ``name`` is the directive type or name.
- ``arguments`` is a list of positional arguments.
- ``options`` is a dictionary mapping option names to values.
- ``content`` is a list of strings, the directive content.
- ``lineno`` is the line number of the first line of the directive.
- ``content_offset`` is the line offset of the first line of the content from
the beginning of the current input. Used when initiating a nested parse.
- ``block_text`` is a string containing the entire directive. Include it as
the content of a literal block in a system message if there is a problem.
- ``state`` is the state which called the directive function.
- ``state_machine`` is the state machine which controls the state which called
the directive function.
Function attributes, interpreted by the directive parser (which calls the
directive function):
- ``arguments``: A 3-tuple specifying the expected positional arguments, or
``None`` if the directive has no arguments. The 3 items in the tuple are
``(required, optional, whitespace OK in last argument)``:
1. The number of required arguments.
2. The number of optional arguments.
3. A boolean, indicating if the final argument may contain whitespace.
Arguments are normally single whitespace-separated words. The final
argument may contain whitespace if the third item in the argument spec tuple
is 1/True. If the form of the arguments is more complex, specify only one
argument (either required or optional) and indicate that final whitespace is
OK; the client code must do any context-sensitive parsing.
- ``options``: A dictionary, mapping known option names to conversion
functions such as `int` or `float`. ``None`` or an empty dict implies no
options to parse.
- ``content``: A boolean; true if content is allowed. Client code must handle
the case where content is required but not supplied (an empty content list
will be supplied).
Directive functions return a list of nodes which will be inserted into the
document tree at the point where the directive was encountered (can be an
empty list).
See `Creating reStructuredText Directives`_ for more information.
.. _Creating reStructuredText Directives:
http://docutils.sourceforge.net/spec/howto/rst-directives.html
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
from docutils.parsers.rst.languages import en as _fallback_language_module
_directive_registry = {
'attention': ('admonitions', 'attention'),
'caution': ('admonitions', 'caution'),
'danger': ('admonitions', 'danger'),
'error': ('admonitions', 'error'),
'important': ('admonitions', 'important'),
'note': ('admonitions', 'note'),
'tip': ('admonitions', 'tip'),
'hint': ('admonitions', 'hint'),
'warning': ('admonitions', 'warning'),
'topic': ('body', 'topic'),
'line-block': ('body', 'line_block'),
'parsed-literal': ('body', 'parsed_literal'),
#'questions': ('body', 'question_list'),
'image': ('images', 'image'),
'figure': ('images', 'figure'),
'contents': ('parts', 'contents'),
'sectnum': ('parts', 'sectnum'),
#'footnotes': ('parts', 'footnotes'),
#'citations': ('parts', 'citations'),
'target-notes': ('references', 'target_notes'),
'meta': ('html', 'meta'),
#'imagemap': ('html', 'imagemap'),
'raw': ('misc', 'raw'),
'include': ('misc', 'include'),
'replace': ('misc', 'replace'),
'restructuredtext-test-directive': ('misc', 'directive_test_function'),}
"""Mapping of directive name to (module name, function name). The directive
name is canonical & must be lowercase. Language-dependent names are defined
in the ``language`` subpackage."""
_modules = {}
"""Cache of imported directive modules."""
_directives = {}
"""Cache of imported directive functions."""
def directive(directive_name, language_module, document):
"""
Locate and return a directive function from its language-dependent name.
If not found in the current language, check English.
"""
normname = directive_name.lower()
messages = []
if _directives.has_key(normname):
return _directives[normname], messages
try:
canonicalname = language_module.directives[normname]
except (KeyError, AttributeError):
msg_text = ('No directive entry for "%s" in module "%s".'
% (directive_name, language_module.__name__))
try:
canonicalname = _fallback_language_module.directives[normname]
msg_text += ('\nUsing English fallback for directive "%s".'
% directive_name)
except KeyError:
msg_text += ('\nTrying "%s" as canonical directive name.'
% directive_name)
# The canonical name should be an English name, but just in case:
canonicalname = normname
warning = document.reporter.warning(
msg_text, line=document.current_line)
messages.append(warning)
try:
modulename, functionname = _directive_registry[canonicalname]
except KeyError:
return None, messages
if _modules.has_key(modulename):
module = _modules[modulename]
else:
try:
module = __import__(modulename, globals(), locals())
except ImportError:
return None, messages
try:
function = getattr(module, functionname)
_directives[normname] = function
except AttributeError:
return None, messages
return function, messages
def flag(argument):
"""
Check for a valid flag option (no argument) and return ``None``.
Raise ``ValueError`` if an argument is found.
"""
if argument and argument.strip():
raise ValueError('no argument is allowed; "%s" supplied' % argument)
else:
return None
def unchanged(argument):
"""
Return the argument, unchanged.
Raise ``ValueError`` if no argument is found.
"""
if argument is None:
raise ValueError('argument required but none supplied')
else:
return argument # unchanged!
def path(argument):
"""
Return the path argument unwrapped (with newlines removed).
Raise ``ValueError`` if no argument is found or if the path contains
internal whitespace.
"""
if argument is None:
raise ValueError('argument required but none supplied')
else:
path = ''.join([s.strip() for s in argument.splitlines()])
if path.find(' ') == -1:
return path
else:
raise ValueError('path contains whitespace')
def nonnegative_int(argument):
"""
Check for a nonnegative integer argument; raise ``ValueError`` if not.
"""
value = int(argument)
if value < 0:
raise ValueError('negative value; must be positive or zero')
return value
def format_values(values):
return '%s, or "%s"' % (', '.join(['"%s"' % s for s in values[:-1]]),
values[-1])
def choice(argument, values):
try:
value = argument.lower().strip()
except AttributeError:
raise ValueError('must supply an argument; choose from %s'
% format_values(values))
if value in values:
return value
else:
raise ValueError('"%s" unknown; choose from %s'
% (argument, format_values(values)))

View File

@ -0,0 +1,74 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils.parsers.rst import states
from docutils import nodes
def admonition(node_class, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
text = '\n'.join(content)
admonition_node = node_class(text)
if text:
state.nested_parse(content, content_offset, admonition_node)
return [admonition_node]
else:
error = state_machine.reporter.error(
'The "%s" admonition is empty; content required.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
def attention(*args):
return admonition(nodes.attention, *args)
attention.content = 1
def caution(*args):
return admonition(nodes.caution, *args)
caution.content = 1
def danger(*args):
return admonition(nodes.danger, *args)
danger.content = 1
def error(*args):
return admonition(nodes.error, *args)
error.content = 1
def important(*args):
return admonition(nodes.important, *args)
important.content = 1
def note(*args):
return admonition(nodes.note, *args)
note.content = 1
def tip(*args):
return admonition(nodes.tip, *args)
tip.content = 1
def hint(*args):
return admonition(nodes.hint, *args)
hint.content = 1
def warning(*args):
return admonition(nodes.warning, *args)
warning.content = 1

View File

@ -0,0 +1,64 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Directives for additional body elements.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes
def topic(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if not state_machine.match_titles:
error = state_machine.reporter.error(
'Topics may not be nested within topics or body elements.',
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
if not content:
warning = state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% name, nodes.literal_block(block_text, block_text),
line=lineno)
return [warning]
title_text = arguments[0]
textnodes, messages = state.inline_text(title_text, lineno)
title = nodes.title(title_text, '', *textnodes)
text = '\n'.join(content)
topic_node = nodes.topic(text, title, *messages)
if text:
state.nested_parse(content, content_offset, topic_node)
return [topic_node]
topic.arguments = (1, 0, 1)
topic.content = 1
def line_block(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
node_class=nodes.line_block):
if not content:
warning = state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% name, nodes.literal_block(block_text, block_text), line=lineno)
return [warning]
text = '\n'.join(content)
text_nodes, messages = state.inline_text(text, lineno)
node = node_class(text, '', *text_nodes)
return [node] + messages
line_block.content = 1
def parsed_literal(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return line_block(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine,
node_class=nodes.literal_block)
parsed_literal.content = 1

View File

@ -0,0 +1,96 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Directives for typically HTML-specific constructs.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes, utils
from docutils.parsers.rst import states
from docutils.transforms import components
def meta(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
node = nodes.Element()
if content:
new_line_offset, blank_finish = state.nested_list_parse(
content, content_offset, node, initial_state='MetaBody',
blank_finish=1, state_machine_kwargs=metaSMkwargs)
if (new_line_offset - content_offset) != len(content):
# incomplete parse of block?
error = state_machine.reporter.error(
'Invalid meta directive.',
nodes.literal_block(block_text, block_text), line=lineno)
node += error
else:
error = state_machine.reporter.error(
'Empty meta directive.',
nodes.literal_block(block_text, block_text), line=lineno)
node += error
return node.get_children()
meta.content = 1
def imagemap(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return []
class MetaBody(states.SpecializedBody):
class meta(nodes.Special, nodes.PreBibliographic, nodes.Element):
"""HTML-specific "meta" element."""
pass
def field_marker(self, match, context, next_state):
"""Meta element."""
node, blank_finish = self.parsemeta(match)
self.parent += node
return [], next_state, []
def parsemeta(self, match):
name = self.parse_field_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
node = self.meta()
pending = nodes.pending(components.Filter,
{'component': 'writer',
'format': 'html',
'nodes': [node]})
node['content'] = ' '.join(indented)
if not indented:
line = self.state_machine.line
msg = self.reporter.info(
'No content for meta tag "%s".' % name,
nodes.literal_block(line, line),
line=self.state_machine.abs_line_number())
return msg, blank_finish
tokens = name.split()
try:
attname, val = utils.extract_name_value(tokens[0])[0]
node[attname.lower()] = val
except utils.NameValueError:
node['name'] = tokens[0]
for token in tokens[1:]:
try:
attname, val = utils.extract_name_value(token)[0]
node[attname.lower()] = val
except utils.NameValueError, detail:
line = self.state_machine.line
msg = self.reporter.error(
'Error parsing meta tag attribute "%s": %s.'
% (token, detail), nodes.literal_block(line, line),
line=self.state_machine.abs_line_number())
return msg, blank_finish
self.document.note_pending(pending)
return pending, blank_finish
metaSMkwargs = {'state_classes': (MetaBody,)}

View File

@ -0,0 +1,70 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Directives for figures and simple images.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes, utils
from docutils.parsers.rst import directives
align_values = ('top', 'middle', 'bottom', 'left', 'center', 'right')
def align(argument):
return directives.choice(argument, align_values)
def image(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
reference = ''.join(arguments[0].split('\n'))
if reference.find(' ') != -1:
error = state_machine.reporter.error(
'Image URI contains whitespace.',
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
options['uri'] = reference
image_node = nodes.image(block_text, **options)
return [image_node]
image.arguments = (1, 0, 1)
image.options = {'alt': directives.unchanged,
'height': directives.nonnegative_int,
'width': directives.nonnegative_int,
'scale': directives.nonnegative_int,
'align': align}
def figure(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
(image_node,) = image(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine)
if isinstance(image_node, nodes.system_message):
return [image_node]
figure_node = nodes.figure('', image_node)
if content:
node = nodes.Element() # anonymous container for parsing
state.nested_parse(content, content_offset, node)
first_node = node[0]
if isinstance(first_node, nodes.paragraph):
caption = nodes.caption(first_node.rawsource, '',
*first_node.children)
figure_node += caption
elif not (isinstance(first_node, nodes.comment)
and len(first_node) == 0):
error = state_machine.reporter.error(
'Figure caption must be a paragraph or empty comment.',
nodes.literal_block(block_text, block_text), line=lineno)
return [figure_node, error]
if len(node) > 1:
figure_node += nodes.legend('', *node[1:])
return [figure_node]
figure.arguments = (1, 0, 1)
figure.options = image.options
figure.content = 1

View File

@ -0,0 +1,169 @@
# Authors: David Goodger, Dethe Elza
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""Miscellaneous directives."""
__docformat__ = 'reStructuredText'
import sys
import os.path
from urllib2 import urlopen, URLError
from docutils import nodes, statemachine, utils
from docutils.parsers.rst import directives, states
def include(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Include a reST file as part of the content of this reST file."""
source_dir = os.path.dirname(
os.path.abspath(state.document.current_source))
path = ''.join(arguments[0].splitlines())
if path.find(' ') != -1:
error = state_machine.reporter.error(
'"%s" directive path contains whitespace.' % name,
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
path = os.path.normpath(os.path.join(source_dir, path))
path = utils.relative_path(None, path)
try:
include_file = open(path)
except IOError, error:
severe = state_machine.reporter.severe(
'Problems with "%s" directive path:\n%s.' % (name, error),
nodes.literal_block(block_text, block_text), line=lineno)
return [severe]
include_text = include_file.read()
include_file.close()
if options.has_key('literal'):
literal_block = nodes.literal_block(include_text, include_text,
source=path)
literal_block.line = 1
return literal_block
else:
include_lines = statemachine.string2lines(include_text,
convert_whitespace=1)
state_machine.insert_input(include_lines, path)
return []
include.arguments = (1, 0, 1)
include.options = {'literal': directives.flag}
def raw(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
Pass through content unchanged
Content is included in output based on type argument
Content may be included inline (content section of directive) or
imported from a file or url.
"""
attributes = {'format': arguments[0]}
if content:
if options.has_key('file') or options.has_key('url'):
error = state_machine.reporter.error(
'"%s" directive may not both specify an external file and '
'have content.' % name,
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
text = '\n'.join(content)
elif options.has_key('file'):
if options.has_key('url'):
error = state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously '
'specified for the "%s" directive.' % name,
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
source_dir = os.path.dirname(
os.path.abspath(state.document.current_source))
path = os.path.normpath(os.path.join(source_dir, options['file']))
path = utils.relative_path(None, path)
try:
raw_file = open(path)
except IOError, error:
severe = state_machine.reporter.severe(
'Problems with "%s" directive path:\n%s.' % (name, error),
nodes.literal_block(block_text, block_text), line=lineno)
return [severe]
text = raw_file.read()
raw_file.close()
attributes['source'] = path
elif options.has_key('url'):
try:
raw_file = urlopen(options['url'])
except (URLError, IOError, OSError), error:
severe = state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (name, options['url'], error),
nodes.literal_block(block_text, block_text), line=lineno)
return [severe]
text = raw_file.read()
raw_file.close()
attributes['source'] = options['file']
else:
error = state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
raw_node = nodes.raw('', text, **attributes)
return [raw_node]
raw.arguments = (1, 0, 1)
raw.options = {'file': directives.path,
'url': directives.path}
raw.content = 1
def replace(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if not isinstance(state, states.SubstitutionDef):
error = state_machine.reporter.error(
'Invalid context: the "%s" directive can only be used within a '
'substitution definition.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
text = '\n'.join(content)
element = nodes.Element(text)
if text:
state.nested_parse(content, content_offset, element)
if len(element) != 1 or not isinstance(element[0], nodes.paragraph):
messages = []
for node in element:
if isinstance(node, nodes.system_message):
if node.has_key('backrefs'):
del node['backrefs']
messages.append(node)
error = state_machine.reporter.error(
'Error in "%s" directive: may contain a single paragraph '
'only.' % (name), line=lineno)
messages.append(error)
return messages
else:
return element[0].children
else:
error = state_machine.reporter.error(
'The "%s" directive is empty; content required.' % (name),
line=lineno)
return [error]
replace.content = 1
def directive_test_function(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if content:
text = '\n'.join(content)
info = state_machine.reporter.info(
'Directive processed. Type="%s", arguments=%r, options=%r, '
'content:' % (name, arguments, options),
nodes.literal_block(text, text), line=lineno)
else:
info = state_machine.reporter.info(
'Directive processed. Type="%s", arguments=%r, options=%r, '
'content: None' % (name, arguments, options), line=lineno)
return [info]
directive_test_function.arguments = (0, 1, 1)
directive_test_function.options = {'option': directives.unchanged}
directive_test_function.content = 1

View File

@ -0,0 +1,55 @@
# Author: David Goodger, Dmitry Jemerov
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Directives for document parts.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
from docutils.transforms import parts
from docutils.parsers.rst import directives
backlinks_values = ('top', 'entry', 'none')
def backlinks(arg):
value = directives.choice(arg, backlinks_values)
if value == 'none':
return None
else:
return value
def contents(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Table of contents."""
if arguments:
title_text = arguments[0]
text_nodes, messages = state.inline_text(title_text, lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
messages = []
title = None
pending = nodes.pending(parts.Contents, {'title': title}, block_text)
pending.details.update(options)
state_machine.document.note_pending(pending)
return [pending] + messages
contents.arguments = (0, 1, 1)
contents.options = {'depth': directives.nonnegative_int,
'local': directives.flag,
'backlinks': backlinks}
def sectnum(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Automatic section numbering."""
pending = nodes.pending(parts.SectNum)
pending.details.update(options)
state_machine.document.note_pending(pending)
return [pending]
sectnum.options = {'depth': int}

View File

@ -0,0 +1,23 @@
# Author: David Goodger, Dmitry Jemerov
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Directives for references and targets.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
from docutils.transforms import references
def target_notes(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Target footnote generation."""
pending = nodes.pending(references.TargetNotes)
state_machine.document.note_pending(pending)
nodelist = [pending]
return nodelist

View File

@ -0,0 +1 @@
*.pyc

View File

@ -0,0 +1,21 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
This package contains modules for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
_languages = {}
def get_language(language_code):
if _languages.has_key(language_code):
return _languages[language_code]
module = __import__(language_code, globals(), locals())
_languages[language_code] = module
return module

View File

@ -0,0 +1,47 @@
# Author: Engelbert Gruber
# Contact: grubert@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
German-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
'achtung': 'attention',
'vorsicht': 'caution',
'gefahr': 'danger',
'fehler': 'error',
'hinweis': 'hint',
'wichtig': 'important',
'notiz': 'note',
'tip': 'tip',
'warnung': 'warning',
'topic': 'topic', # Überbegriff
'line-block': 'line-block',
'parsed-literal': 'parsed-literal',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'meta': 'meta',
#'imagemap': 'imagemap',
'bild': 'image',
'abbildung': 'figure',
'raw': 'raw', # unbearbeitet
'include': 'include', # einfügen, "füge ein" would be more like a command.
# einfügung would be the noun.
'replace': 'replace', # ersetzen, ersetze
'inhalt': 'contents',
'sectnum': 'sectnum',
'section-numbering': 'sectnum',
'target-notes': 'target-notes',
#'footnotes': 'footnotes',
#'citations': 'citations',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""English name to registered (in directives/__init__.py) directive name
mapping."""

View File

@ -0,0 +1,46 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
English-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
'attention': 'attention',
'caution': 'caution',
'danger': 'danger',
'error': 'error',
'hint': 'hint',
'important': 'important',
'note': 'note',
'tip': 'tip',
'warning': 'warning',
'topic': 'topic',
'line-block': 'line-block',
'parsed-literal': 'parsed-literal',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'meta': 'meta',
#'imagemap': 'imagemap',
'image': 'image',
'figure': 'figure',
'include': 'include',
'raw': 'raw',
'replace': 'replace',
'contents': 'contents',
'sectnum': 'sectnum',
'section-numbering': 'sectnum',
'target-notes': 'target-notes',
#'footnotes': 'footnotes',
#'citations': 'citations',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""English name to registered (in directives/__init__.py) directive name
mapping."""

View File

@ -0,0 +1,39 @@
# Author: Adam Chodorowski
# Contact: chodorowski@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Swedish language mappings for language-dependent features of reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
u'observera': 'attention',
u'varning': 'caution',
u'fara': 'danger',
u'fel': 'error',
u'v\u00e4gledning': 'hint',
u'viktigt': 'important',
u'notera': 'note',
u'tips': 'tip',
u'varning': 'warning',
u'fr\u00e5gor': 'questions',
# NOTE: A bit long, but recommended by http://www.nada.kth.se/dataterm/:
u'fr\u00e5gor-och-svar': 'questions',
u'vanliga-fr\u00e5gor': 'questions',
u'meta': 'meta',
# u'bildkarta': 'imagemap', # FIXME: Translation might be too literal.
u'bild': 'image',
u'figur': 'figure',
# u'r\u00e5': 'raw', # FIXME: Translation might be too literal.
u'inneh\u00e5ll': 'contents',
# u'fotnoter': 'footnotes',
# u'citeringar': 'citations',
# u'\u00e4mne': 'topic',
u'restructuredtext-test-directive': 'restructuredtext-test-directive' }
"""Swedish name to registered (in directives/__init__.py) directive name
mapping."""

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,530 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
This module defines table parser classes,which parse plaintext-graphic tables
and produce a well-formed data structure suitable for building a CALS table.
:Classes:
- `GridTableParser`: Parse fully-formed tables represented with a grid.
- `SimpleTableParser`: Parse simple tables, delimited by top & bottom
borders.
:Exception class: `TableMarkupError`
:Function:
`update_dict_of_lists()`: Merge two dictionaries containing list values.
"""
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import DataError
class TableMarkupError(DataError): pass
class TableParser:
"""
Abstract superclass for the common parts of the syntax-specific parsers.
"""
head_body_separator_pat = None
"""Matches the row separator between head rows and body rows."""
def parse(self, block):
"""
Analyze the text `block` and return a table data structure.
Given a plaintext-graphic table in `block` (list of lines of text; no
whitespace padding), parse the table, construct and return the data
necessary to construct a CALS table or equivalent.
Raise `TableMarkupError` if there is any problem with the markup.
"""
self.setup(block)
self.find_head_body_sep()
self.parse_table()
structure = self.structure_from_cells()
return structure
def find_head_body_sep(self):
"""Look for a head/body row separator line; store the line index."""
for i in range(len(self.block)):
line = self.block[i]
if self.head_body_separator_pat.match(line):
if self.head_body_sep:
raise TableMarkupError(
'Multiple head/body row separators in table (at line '
'offset %s and %s); only one allowed.'
% (self.head_body_sep, i))
else:
self.head_body_sep = i
self.block[i] = line.replace('=', '-')
if self.head_body_sep == 0 or self.head_body_sep == (len(self.block)
- 1):
raise TableMarkupError('The head/body row separator may not be '
'the first or last line of the table.')
class GridTableParser(TableParser):
"""
Parse a grid table using `parse()`.
Here's an example of a grid table::
+------------------------+------------+----------+----------+
| Header row, column 1 | Header 2 | Header 3 | Header 4 |
+========================+============+==========+==========+
| body row 1, column 1 | column 2 | column 3 | column 4 |
+------------------------+------------+----------+----------+
| body row 2 | Cells may span columns. |
+------------------------+------------+---------------------+
| body row 3 | Cells may | - Table cells |
+------------------------+ span rows. | - contain |
| body row 4 | | - body elements. |
+------------------------+------------+---------------------+
Intersections use '+', row separators use '-' (except for one optional
head/body row separator, which uses '='), and column separators use '|'.
Passing the above table to the `parse()` method will result in the
following data structure::
([24, 12, 10, 10],
[[(0, 0, 1, ['Header row, column 1']),
(0, 0, 1, ['Header 2']),
(0, 0, 1, ['Header 3']),
(0, 0, 1, ['Header 4'])]],
[[(0, 0, 3, ['body row 1, column 1']),
(0, 0, 3, ['column 2']),
(0, 0, 3, ['column 3']),
(0, 0, 3, ['column 4'])],
[(0, 0, 5, ['body row 2']),
(0, 2, 5, ['Cells may span columns.']),
None,
None],
[(0, 0, 7, ['body row 3']),
(1, 0, 7, ['Cells may', 'span rows.', '']),
(1, 1, 7, ['- Table cells', '- contain', '- body elements.']),
None],
[(0, 0, 9, ['body row 4']), None, None, None]])
The first item is a list containing column widths (colspecs). The second
item is a list of head rows, and the third is a list of body rows. Each
row contains a list of cells. Each cell is either None (for a cell unused
because of another cell's span), or a tuple. A cell tuple contains four
items: the number of extra rows used by the cell in a vertical span
(morerows); the number of extra columns used by the cell in a horizontal
span (morecols); the line offset of the first line of the cell contents;
and the cell contents, a list of lines of text.
"""
head_body_separator_pat = re.compile(r'\+=[=+]+=\+ *$')
def setup(self, block):
self.block = list(block) # make a copy; it may be modified
self.bottom = len(block) - 1
self.right = len(block[0]) - 1
self.head_body_sep = None
self.done = [-1] * len(block[0])
self.cells = []
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
Start with a queue of upper-left corners, containing the upper-left
corner of the table itself. Trace out one rectangular cell, remember
it, and add its upper-right and lower-left corners to the queue of
potential upper-left corners of further cells. Process the queue in
top-to-bottom order, keeping track of how much of each text column has
been seen.
We'll end up knowing all the row and column boundaries, cell positions
and their dimensions.
"""
corners = [(0, 0)]
while corners:
top, left = corners.pop(0)
if top == self.bottom or left == self.right \
or top <= self.done[left]:
continue
result = self.scan_cell(top, left)
if not result:
continue
bottom, right, rowseps, colseps = result
update_dict_of_lists(self.rowseps, rowseps)
update_dict_of_lists(self.colseps, colseps)
self.mark_done(top, left, bottom, right)
cellblock = self.get_cell_block(top, left, bottom, right)
self.cells.append((top, left, bottom, right, cellblock))
corners.extend([(top, right), (bottom, left)])
corners.sort()
if not self.check_parse_complete():
raise TableMarkupError('Malformed table; parse incomplete.')
def mark_done(self, top, left, bottom, right):
"""For keeping track of how much of each text column has been seen."""
before = top - 1
after = bottom - 1
for col in range(left, right):
assert self.done[col] == before
self.done[col] = after
def check_parse_complete(self):
"""Each text column should have been completely seen."""
last = self.bottom - 1
for col in range(self.right):
if self.done[col] != last:
return None
return 1
def get_cell_block(self, top, left, bottom, right):
"""Given the corners, extract the text of a cell."""
cellblock = []
margin = right
for lineno in range(top + 1, bottom):
line = self.block[lineno][left + 1 : right].rstrip()
cellblock.append(line)
if line:
margin = min(margin, len(line) - len(line.lstrip()))
if 0 < margin < right:
cellblock = [line[margin:] for line in cellblock]
return cellblock
def scan_cell(self, top, left):
"""Starting at the top-left corner, start tracing out a cell."""
assert self.block[top][left] == '+'
result = self.scan_right(top, left)
return result
def scan_right(self, top, left):
"""
Look for the top-right corner of the cell, and make note of all column
boundaries ('+').
"""
colseps = {}
line = self.block[top]
for i in range(left + 1, self.right + 1):
if line[i] == '+':
colseps[i] = [top]
result = self.scan_down(top, left, i)
if result:
bottom, rowseps, newcolseps = result
update_dict_of_lists(colseps, newcolseps)
return bottom, i, rowseps, colseps
elif line[i] != '-':
return None
return None
def scan_down(self, top, left, right):
"""
Look for the bottom-right corner of the cell, making note of all row
boundaries.
"""
rowseps = {}
for i in range(top + 1, self.bottom + 1):
if self.block[i][right] == '+':
rowseps[i] = [right]
result = self.scan_left(top, left, i, right)
if result:
newrowseps, colseps = result
update_dict_of_lists(rowseps, newrowseps)
return i, rowseps, colseps
elif self.block[i][right] != '|':
return None
return None
def scan_left(self, top, left, bottom, right):
"""
Noting column boundaries, look for the bottom-left corner of the cell.
It must line up with the starting point.
"""
colseps = {}
line = self.block[bottom]
for i in range(right - 1, left, -1):
if line[i] == '+':
colseps[i] = [bottom]
elif line[i] != '-':
return None
if line[left] != '+':
return None
result = self.scan_up(top, left, bottom, right)
if result is not None:
rowseps = result
return rowseps, colseps
return None
def scan_up(self, top, left, bottom, right):
"""
Noting row boundaries, see if we can return to the starting point.
"""
rowseps = {}
for i in range(bottom - 1, top, -1):
if self.block[i][left] == '+':
rowseps[i] = [left]
elif self.block[i][left] != '|':
return None
return rowseps
def structure_from_cells(self):
"""
From the data colledted by `scan_cell()`, convert to the final data
structure.
"""
rowseps = self.rowseps.keys() # list of row boundaries
rowseps.sort()
rowindex = {}
for i in range(len(rowseps)):
rowindex[rowseps[i]] = i # row boundary -> row number mapping
colseps = self.colseps.keys() # list of column boundaries
colseps.sort()
colindex = {}
for i in range(len(colseps)):
colindex[colseps[i]] = i # column boundary -> col number map
colspecs = [(colseps[i] - colseps[i - 1] - 1)
for i in range(1, len(colseps))] # list of column widths
# prepare an empty table with the correct number of rows & columns
onerow = [None for i in range(len(colseps) - 1)]
rows = [onerow[:] for i in range(len(rowseps) - 1)]
# keep track of # of cells remaining; should reduce to zero
remaining = (len(rowseps) - 1) * (len(colseps) - 1)
for top, left, bottom, right, block in self.cells:
rownum = rowindex[top]
colnum = colindex[left]
assert rows[rownum][colnum] is None, (
'Cell (row %s, column %s) already used.'
% (rownum + 1, colnum + 1))
morerows = rowindex[bottom] - rownum - 1
morecols = colindex[right] - colnum - 1
remaining -= (morerows + 1) * (morecols + 1)
# write the cell into the table
rows[rownum][colnum] = (morerows, morecols, top + 1, block)
assert remaining == 0, 'Unused cells remaining.'
if self.head_body_sep: # separate head rows from body rows
numheadrows = rowindex[self.head_body_sep]
headrows = rows[:numheadrows]
bodyrows = rows[numheadrows:]
else:
headrows = []
bodyrows = rows
return (colspecs, headrows, bodyrows)
class SimpleTableParser(TableParser):
"""
Parse a simple table using `parse()`.
Here's an example of a simple table::
===== =====
col 1 col 2
===== =====
1 Second column of row 1.
2 Second column of row 2.
Second line of paragraph.
3 - Second column of row 3.
- Second item in bullet
list (row 3, column 2).
4 is a span
------------
5
===== =====
Top and bottom borders use '=', column span underlines use '-', column
separation is indicated with spaces.
Passing the above table to the `parse()` method will result in the
following data structure, whose interpretation is the same as for
`GridTableParser`::
([5, 25],
[[(0, 0, 1, ['col 1']),
(0, 0, 1, ['col 2'])]],
[[(0, 0, 3, ['1']),
(0, 0, 3, ['Second column of row 1.'])],
[(0, 0, 4, ['2']),
(0, 0, 4, ['Second column of row 2.',
'Second line of paragraph.'])],
[(0, 0, 6, ['3']),
(0, 0, 6, ['- Second column of row 3.',
'',
'- Second item in bullet',
' list (row 3, column 2).'])],
[(0, 1, 10, ['4 is a span'])],
[(0, 0, 12, ['5']),
(0, 0, 12, [''])]])
"""
head_body_separator_pat = re.compile('=[ =]*$')
span_pat = re.compile('-[ -]*$')
def setup(self, block):
self.block = list(block) # make a copy; it will be modified
# Convert top & bottom borders to column span underlines:
self.block[0] = self.block[0].replace('=', '-')
self.block[-1] = self.block[-1].replace('=', '-')
self.head_body_sep = None
self.columns = []
self.border_end = None
self.table = []
self.done = [-1] * len(block[0])
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
First determine the column boundaries from the top border, then
process rows. Each row may consist of multiple lines; accumulate
lines until a row is complete. Call `self.parse_row` to finish the
job.
"""
# Top border must fully describe all table columns.
self.columns = self.parse_columns(self.block[0], 0)
self.border_end = self.columns[-1][1]
firststart, firstend = self.columns[0]
block = self.block[1:]
offset = 0
# Container for accumulating text lines until a row is complete:
rowlines = []
while block:
line = block.pop(0)
offset += 1
if self.span_pat.match(line):
# Column span underline or border; row is complete.
self.parse_row(rowlines, (line.rstrip(), offset))
rowlines = []
elif line[firststart:firstend].strip():
# First column not blank, therefore it's a new row.
if rowlines:
self.parse_row(rowlines)
rowlines = [(line.rstrip(), offset)]
else:
# Accumulate lines of incomplete row.
rowlines.append((line.rstrip(), offset))
def parse_columns(self, line, offset):
"""
Given a column span underline, return a list of (begin, end) pairs.
"""
cols = []
end = 0
while 1:
begin = line.find('-', end)
end = line.find(' ', begin)
if begin < 0:
break
if end < 0:
end = len(line)
cols.append((begin, end))
if self.columns:
if cols[-1][1] != self.border_end:
raise TableMarkupError('Column span incomplete at line '
'offset %s.' % offset)
# Allow for an unbounded rightmost column:
cols[-1] = (cols[-1][0], self.columns[-1][1])
return cols
def init_row(self, colspec, offset):
i = 0
cells = []
for start, end in colspec:
morecols = 0
try:
assert start == self.columns[i][0]
while end != self.columns[i][1]:
i += 1
morecols += 1
except (AssertionError, IndexError):
raise TableMarkupError('Column span alignment problem at '
'line offset %s.' % offset)
cells.append((0, morecols, offset, []))
i += 1
return cells
def parse_row(self, lines, spanline=None):
"""
Given the text `lines` of a row, parse it and append to `self.table`.
The row is parsed according to the current column spec (either
`spanline` if provided or `self.columns`). For each column, extract
text from each line, and check for text in column margins. Finally,
adjust for insigificant whitespace.
"""
while lines and not lines[-1][0]:
lines.pop() # Remove blank trailing lines.
if lines:
offset = lines[0][1]
elif spanline:
offset = spanline[1]
else:
# No new row, just blank lines.
return
if spanline:
columns = self.parse_columns(*spanline)
else:
columns = self.columns[:]
row = self.init_row(columns, offset)
# "Infinite" value for a dummy last column's beginning, used to
# check for text overflow:
columns.append((sys.maxint, None))
lastcol = len(columns) - 2
for i in range(len(columns) - 1):
start, end = columns[i]
nextstart = columns[i+1][0]
block = []
margin = sys.maxint
for line, offset in lines:
if i == lastcol and line[end:].strip():
text = line[start:].rstrip()
columns[lastcol] = (start, start + len(text))
self.adjust_last_column(start + len(text))
elif line[end:nextstart].strip():
raise TableMarkupError('Text in column margin at line '
'offset %s.' % offset)
else:
text = line[start:end].rstrip()
block.append(text)
if text:
margin = min(margin, len(text) - len(text.lstrip()))
if 0 < margin < sys.maxint:
block = [line[margin:] for line in block]
row[i][3].extend(block)
self.table.append(row)
def adjust_last_column(self, new_end):
start, end = self.columns[-1]
if new_end > end:
self.columns[-1] = (start, new_end)
def structure_from_cells(self):
colspecs = [end - start for start, end in self.columns]
first_body_row = 0
if self.head_body_sep:
for i in range(len(self.table)):
if self.table[i][0][2] > self.head_body_sep:
first_body_row = i
break
return (colspecs, self.table[:first_body_row],
self.table[first_body_row:])
def update_dict_of_lists(master, newdata):
"""
Extend the list values of `master` with those from `newdata`.
Both parameters must be dictionaries containing list values.
"""
for key, values in newdata.items():
master.setdefault(key, []).extend(values)

View File

@ -0,0 +1 @@
*.pyc

View File

@ -0,0 +1,89 @@
# Authors: David Goodger; Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils Reader modules.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import utils, parsers, Component
from docutils.transforms import universal
class Reader(Component):
"""
Abstract base class for docutils Readers.
Each reader module or package must export a subclass also called 'Reader'.
The three steps of a Reader's responsibility are defined: `scan()`,
`parse()`, and `transform()`. Call `read()` to process a document.
"""
component_type = 'reader'
def __init__(self, parser=None, parser_name='restructuredtext'):
"""
Initialize the Reader instance.
Several instance attributes are defined with dummy initial values.
Subclasses may use these attributes as they wish.
"""
self.parser = parser
"""A `parsers.Parser` instance shared by all doctrees. May be left
unspecified if the document source determines the parser."""
if parser is None and parser_name:
self.set_parser(parser_name)
self.source = None
"""`docutils.io` IO object, source of input data."""
self.input = None
"""Raw text input; either a single string or, for more complex cases,
a collection of strings."""
def set_parser(self, parser_name):
"""Set `self.parser` by name."""
parser_class = parsers.get_parser_class(parser_name)
self.parser = parser_class()
def read(self, source, parser, settings):
self.source = source
if not self.parser:
self.parser = parser
self.settings = settings
# May modify self.parser, depending on input:
self.input = self.source.read(self)
self.parse()
return self.document
def parse(self):
"""Parse `self.input` into a document tree."""
self.document = document = self.new_document()
self.parser.parse(self.input, document)
document.current_source = document.current_line = None
def new_document(self):
"""Create and return a new empty document tree (root node)."""
document = utils.new_document(self.source.source_path, self.settings)
return document
_reader_aliases = {}
def get_reader_class(reader_name):
"""Return the Reader class from the `reader_name` module."""
reader_name = reader_name.lower()
if _reader_aliases.has_key(reader_name):
reader_name = _reader_aliases[reader_name]
module = __import__(reader_name, globals(), locals())
return module.Reader

60
docutils/readers/pep.py Normal file
View File

@ -0,0 +1,60 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Python Enhancement Proposal (PEP) Reader.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
from docutils import nodes
from docutils.readers import standalone
from docutils.transforms import peps, references
from docutils.parsers import rst
class Reader(standalone.Reader):
supported = ('pep',)
"""Contexts this reader supports."""
settings_spec = (
'PEP Reader Option Defaults',
'The --pep-references and --rfc-references options (for the '
'reStructuredText parser) are on by default.',
())
default_transforms = (references.Substitutions,
peps.Headers,
peps.Contents,
references.ChainedTargets,
references.AnonymousHyperlinks,
references.IndirectHyperlinks,
peps.TargetNotes,
references.Footnotes,
references.ExternalTargets,
references.InternalTargets,)
settings_default_overrides = {'pep_references': 1, 'rfc_references': 1}
def __init__(self, parser=None, parser_name=None):
"""`parser` should be ``None``."""
if parser is None:
parser = rst.Parser(rfc2822=1, inliner=Inliner())
standalone.Reader.__init__(self, parser, '')
class Inliner(rst.states.Inliner):
"""
Extend `rst.Inliner` to for local PEP references.
"""
pep_url = rst.states.Inliner.pep_url_local

View File

@ -0,0 +1,36 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Standalone file Reader for the reStructuredText markup syntax.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import readers
from docutils.transforms import frontmatter, references
from docutils.parsers.rst import Parser
class Reader(readers.Reader):
supported = ('standalone',)
"""Contexts this reader supports."""
document = None
"""A single document tree."""
default_transforms = (references.Substitutions,
frontmatter.DocTitle,
frontmatter.DocInfo,
references.ChainedTargets,
references.AnonymousHyperlinks,
references.IndirectHyperlinks,
references.Footnotes,
references.ExternalTargets,
references.InternalTargets,)

81
docutils/roman.py Normal file
View File

@ -0,0 +1,81 @@
"""Convert to and from Roman numerals"""
__author__ = "Mark Pilgrim (f8dy@diveintopython.org)"
__version__ = "1.4"
__date__ = "8 August 2001"
__copyright__ = """Copyright (c) 2001 Mark Pilgrim
This program is part of "Dive Into Python", a free Python tutorial for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
This program is free software; you can redistribute it and/or modify
it under the terms of the Python 2.1.1 license, available at
http://www.python.org/2.1.1/license.html
"""
import re
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
if not (0 < n < 5000):
raise OutOfRangeError, "number out of range (must be 1..4999)"
if int(n) <> n:
raise NotIntegerError, "decimals can not be converted"
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
#Define pattern to detect valid Roman numerals
romanNumeralPattern = re.compile('''
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
$ # end of string
''' ,re.VERBOSE)
def fromRoman(s):
"""convert Roman numeral to integer"""
if not s:
raise InvalidRomanNumeralError, 'Input can not be blank'
if not romanNumeralPattern.search(s):
raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result

1444
docutils/statemachine.py Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
*.pyc

View File

@ -0,0 +1,166 @@
# Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
This package contains modules for standard tree transforms available
to Docutils components. Tree transforms serve a variety of purposes:
- To tie up certain syntax-specific "loose ends" that remain after the
initial parsing of the input plaintext. These transforms are used to
supplement a limited syntax.
- To automate the internal linking of the document tree (hyperlink
references, footnote references, etc.).
- To extract useful information from the document tree. These
transforms may be used to construct (for example) indexes and tables
of contents.
Each transform is an optional step that a Docutils Reader may choose to
perform on the parsed document, depending on the input context. A Docutils
Reader may also perform Reader-specific transforms before or after performing
these standard transforms.
"""
__docformat__ = 'reStructuredText'
from docutils import languages, ApplicationError, TransformSpec
class TransformError(ApplicationError): pass
class Transform:
"""
Docutils transform component abstract base class.
"""
default_priority = None
"""Numerical priority of this transform, 0 through 999 (override)."""
def __init__(self, document, startnode=None):
"""
Initial setup for in-place document transforms.
"""
self.document = document
"""The document tree to transform."""
self.startnode = startnode
"""Node from which to begin the transform. For many transforms which
apply to the document as a whole, `startnode` is not set (i.e. its
value is `None`)."""
self.language = languages.get_language(
document.settings.language_code)
"""Language module local to this document."""
def apply(self):
"""Override to apply the transform to the document tree."""
raise NotImplementedError('subclass must override this method')
class Transformer(TransformSpec):
"""
Stores transforms (`Transform` classes) and applies them to document
trees. Also keeps track of components by component type name.
"""
from docutils.transforms import universal
default_transforms = (universal.Decorations,
universal.FinalChecks,
universal.Messages)
"""These transforms are applied to all document trees."""
def __init__(self, document):
self.transforms = []
"""List of transforms to apply. Each item is a 3-tuple:
``(priority string, transform class, pending node or None)``."""
self.document = document
"""The `nodes.document` object this Transformer is attached to."""
self.applied = []
"""Transforms already applied, in order."""
self.sorted = 0
"""Boolean: is `self.tranforms` sorted?"""
self.components = {}
"""Mapping of component type name to component object. Set by
`self.populate_from_components()`."""
self.serialno = 0
"""Internal serial number to keep track of the add order of
transforms."""
def add_transform(self, transform_class, priority=None):
"""
Store a single transform. Use `priority` to override the default.
"""
if priority is None:
priority = transform_class.default_priority
priority_string = self.get_priority_string(priority)
self.transforms.append((priority_string, transform_class, None))
self.sorted = 0
def add_transforms(self, transform_list):
"""Store multiple transforms, with default priorities."""
for transform_class in transform_list:
priority_string = self.get_priority_string(
transform_class.default_priority)
self.transforms.append((priority_string, transform_class, None))
self.sorted = 0
def add_pending(self, pending, priority=None):
"""Store a transform with an associated `pending` node."""
transform_class = pending.transform
if priority is None:
priority = transform_class.default_priority
priority_string = self.get_priority_string(priority)
self.transforms.append((priority_string, transform_class, pending))
self.sorted = 0
def get_priority_string(self, priority):
"""
Return a string, `priority` combined with `self.serialno`.
This ensures FIFO order on transforms with identical priority.
"""
self.serialno += 1
return '%03d-%03d' % (priority, self.serialno)
def populate_from_components(self, components):
"""
Store each component's default transforms, with default priorities.
Also, store components by type name in a mapping for later lookup.
"""
self.add_transforms(self.default_transforms)
for component in components:
if component is None:
continue
self.add_transforms(component.default_transforms)
self.components[component.component_type] = component
self.sorted = 0
def apply_transforms(self):
"""Apply all of the stored transforms, in priority order."""
self.document.reporter.attach_observer(
self.document.note_transform_message)
while self.transforms:
if not self.sorted:
# Unsorted initially, and whenever a transform is added.
self.transforms.sort()
self.transforms.reverse()
self.sorted = 1
priority, transform_class, pending = self.transforms.pop()
transform = transform_class(self.document, startnode=pending)
transform.apply()
self.applied.append((priority, transform_class, pending))

View File

@ -0,0 +1,54 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Docutils component-related transforms.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
class Filter(Transform):
"""
Include or exclude elements which depend on a specific Docutils component.
For use with `nodes.pending` elements. A "pending" element's dictionary
attribute ``details`` must contain the keys "component" and "format". The
value of ``details['component']`` must match the type name of the
component the elements depend on (e.g. "writer"). The value of
``details['format']`` is the name of a specific format or context of that
component (e.g. "html"). If the matching Docutils component supports that
format or context, the "pending" element is replaced by the contents of
``details['nodes']`` (a list of nodes); otherwise, the "pending" element
is removed.
For example, the reStructuredText "meta" directive creates a "pending"
element containing a "meta" element (in ``pending.details['nodes']``).
Only writers (``pending.details['component'] == 'writer'``) supporting the
"html" format (``pending.details['format'] == 'html'``) will include the
"meta" element; it will be deleted from the output of all other writers.
"""
default_priority = 780
def apply(self):
pending = self.startnode
component_type = pending.details['component'] # 'reader' or 'writer'
format = pending.details['format']
component = self.document.transformer.components[component_type]
if component.supports(format):
pending.parent.replace(pending, pending.details['nodes'])
else:
pending.parent.remove(pending)

View File

@ -0,0 +1,380 @@
# Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Transforms related to the front matter of a document (information
found before the main text):
- `DocTitle`: Used to transform a lone top level section's title to
the document title, and promote a remaining lone top-level section's
title to the document subtitle.
- `DocInfo`: Used to transform a bibliographic field list into docinfo
elements.
"""
__docformat__ = 'reStructuredText'
import re
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class DocTitle(Transform):
"""
In reStructuredText_, there is no way to specify a document title
and subtitle explicitly. Instead, we can supply the document title
(and possibly the subtitle as well) implicitly, and use this
two-step transform to "raise" or "promote" the title(s) (and their
corresponding section contents) to the document level.
1. If the document contains a single top-level section as its
first non-comment element, the top-level section's title
becomes the document's title, and the top-level section's
contents become the document's immediate contents. The lone
top-level section header must be the first non-comment element
in the document.
For example, take this input text::
=================
Top-Level Title
=================
A paragraph.
Once parsed, it looks like this::
<document>
<section name="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
After running the DocTitle transform, we have::
<document name="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
2. If step 1 successfully determines the document title, we
continue by checking for a subtitle.
If the lone top-level section itself contains a single
second-level section as its first non-comment element, that
section's title is promoted to the document's subtitle, and
that section's contents become the document's immediate
contents. Given this input text::
=================
Top-Level Title
=================
Second-Level Title
~~~~~~~~~~~~~~~~~~
A paragraph.
After parsing and running the Section Promotion transform, the
result is::
<document name="top-level title">
<title>
Top-Level Title
<subtitle name="second-level title">
Second-Level Title
<paragraph>
A paragraph.
(Note that the implicit hyperlink target generated by the
"Second-Level Title" is preserved on the "subtitle" element
itself.)
Any comment elements occurring before the document title or
subtitle are accumulated and inserted as the first body elements
after the title(s).
"""
default_priority = 320
def apply(self):
if self.promote_document_title():
self.promote_document_subtitle()
def promote_document_title(self):
section, index = self.candidate_index()
if index is None:
return None
document = self.document
# Transfer the section's attributes to the document element (at root):
document.attributes.update(section.attributes)
document[:] = (section[:1] # section title
+ document[:index] # everything that was in the
# document before the section
+ section[1:]) # everything that was in the section
return 1
def promote_document_subtitle(self):
subsection, index = self.candidate_index()
if index is None:
return None
subtitle = nodes.subtitle()
# Transfer the subsection's attributes to the new subtitle:
subtitle.attributes.update(subsection.attributes)
# Transfer the contents of the subsection's title to the subtitle:
subtitle[:] = subsection[0][:]
document = self.document
document[:] = (document[:1] # document title
+ [subtitle]
# everything that was before the section:
+ document[1:index]
# everything that was in the subsection:
+ subsection[1:])
return 1
def candidate_index(self):
"""
Find and return the promotion candidate and its index.
Return (None, None) if no valid candidate was found.
"""
document = self.document
index = document.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None or len(document) > (index + 1) or \
not isinstance(document[index], nodes.section):
return None, None
else:
return document[index], index
class DocInfo(Transform):
"""
This transform is specific to the reStructuredText_ markup syntax;
see "Bibliographic Fields" in the `reStructuredText Markup
Specification`_ for a high-level description. This transform
should be run *after* the `DocTitle` transform.
Given a field list as the first non-comment element after the
document title and subtitle (if present), registered bibliographic
field names are transformed to the corresponding DTD elements,
becoming child elements of the "docinfo" element (except for a
dedication and/or an abstract, which become "topic" elements after
"docinfo").
For example, given this document fragment after parsing::
<document>
<title>
Document Title
<field_list>
<field>
<field_name>
Author
<field_body>
<paragraph>
A. Name
<field>
<field_name>
Status
<field_body>
<paragraph>
$RCSfile$
...
After running the bibliographic field list transform, the
resulting document tree would look like this::
<document>
<title>
Document Title
<docinfo>
<author>
A. Name
<status>
frontmatter.py
...
The "Status" field contained an expanded RCS keyword, which is
normally (but optionally) cleaned up by the transform. The sole
contents of the field body must be a paragraph containing an
expanded RCS keyword of the form "$keyword: expansion text $". Any
RCS keyword can be processed in any bibliographic field. The
dollar signs and leading RCS keyword name are removed. Extra
processing is done for the following RCS keywords:
- "RCSfile" expands to the name of the file in the RCS or CVS
repository, which is the name of the source file with a ",v"
suffix appended. The transform will remove the ",v" suffix.
- "Date" expands to the format "YYYY/MM/DD hh:mm:ss" (in the UTC
time zone). The RCS Keywords transform will extract just the
date itself and transform it to an ISO 8601 format date, as in
"2000-12-31".
(Since the source file for this text is itself stored under CVS,
we can't show an example of the "Date" RCS keyword because we
can't prevent any RCS keywords used in this explanation from
being expanded. Only the "RCSfile" keyword is stable; its
expansion text changes only if the file name changes.)
"""
default_priority = 340
def apply(self):
document = self.document
index = document.first_child_not_matching_class(
nodes.PreBibliographic)
if index is None:
return
candidate = document[index]
if isinstance(candidate, nodes.field_list):
biblioindex = document.first_child_not_matching_class(
nodes.Titular)
nodelist = self.extract_bibliographic(candidate)
del document[index] # untransformed field list (candidate)
document[biblioindex:biblioindex] = nodelist
return
def extract_bibliographic(self, field_list):
docinfo = nodes.docinfo()
bibliofields = self.language.bibliographic_fields
labels = self.language.labels
topics = {'dedication': None, 'abstract': None}
for field in field_list:
try:
name = field[0][0].astext()
normedname = utils.normalize_name(name)
if not (len(field) == 2 and bibliofields.has_key(normedname)
and self.check_empty_biblio_field(field, name)):
raise TransformError
biblioclass = bibliofields[normedname]
if issubclass(biblioclass, nodes.TextElement):
if not self.check_compound_biblio_field(field, name):
raise TransformError
utils.clean_rcs_keywords(
field[1][0], self.rcs_keyword_substitutions)
docinfo.append(biblioclass('', '', *field[1][0]))
else: # multiple body elements possible
if issubclass(biblioclass, nodes.authors):
self.extract_authors(field, name, docinfo)
elif issubclass(biblioclass, nodes.topic):
if topics[normedname]:
field[-1] += self.document.reporter.warning(
'There can only be one "%s" field.' % name,
base_node=field)
raise TransformError
title = nodes.title(name, labels[normedname])
topics[normedname] = biblioclass(
'', title, CLASS=normedname, *field[1].children)
else:
docinfo.append(biblioclass('', *field[1].children))
except TransformError:
if len(field[-1]) == 1 \
and isinstance(field[-1][0], nodes.paragraph):
utils.clean_rcs_keywords(
field[-1][0], self.rcs_keyword_substitutions)
docinfo.append(field)
continue
nodelist = []
if len(docinfo) != 0:
nodelist.append(docinfo)
for name in ('dedication', 'abstract'):
if topics[name]:
nodelist.append(topics[name])
return nodelist
def check_empty_biblio_field(self, field, name):
if len(field[-1]) < 1:
field[-1] += self.document.reporter.warning(
'Cannot extract empty bibliographic field "%s".' % name,
base_node=field)
return None
return 1
def check_compound_biblio_field(self, field, name):
if len(field[-1]) > 1:
field[-1] += self.document.reporter.warning(
'Cannot extract compound bibliographic field "%s".' % name,
base_node=field)
return None
if not isinstance(field[-1][0], nodes.paragraph):
field[-1] += self.document.reporter.warning(
'Cannot extract bibliographic field "%s" containing '
'anything other than a single paragraph.' % name,
base_node=field)
return None
return 1
rcs_keyword_substitutions = [
(re.compile(r'\$' r'Date: (\d\d\d\d)/(\d\d)/(\d\d) [\d:]+ \$$',
re.IGNORECASE), r'\1-\2-\3'),
(re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),]
def extract_authors(self, field, name, docinfo):
try:
if len(field[1]) == 1:
if isinstance(field[1][0], nodes.paragraph):
authors = self.authors_from_one_paragraph(field)
elif isinstance(field[1][0], nodes.bullet_list):
authors = self.authors_from_bullet_list(field)
else:
raise TransformError
else:
authors = self.authors_from_paragraphs(field)
authornodes = [nodes.author('', '', *author)
for author in authors if author]
if len(authornodes) > 1:
docinfo.append(nodes.authors('', *authornodes))
elif len(authornodes) == 1:
docinfo.append(authornodes[0])
else:
raise TransformError
except TransformError:
field[-1] += self.document.reporter.warning(
'Bibliographic field "%s" incompatible with extraction: '
'it must contain either a single paragraph (with authors '
'separated by one of "%s"), multiple paragraphs (one per '
'author), or a bullet list with one paragraph (one author) '
'per item.'
% (name, ''.join(self.language.author_separators)),
base_node=field)
raise
def authors_from_one_paragraph(self, field):
text = field[1][0].astext().strip()
if not text:
raise TransformError
for authorsep in self.language.author_separators:
authornames = text.split(authorsep)
if len(authornames) > 1:
break
authornames = [author.strip() for author in authornames]
authors = [[nodes.Text(author)] for author in authornames if author]
return authors
def authors_from_bullet_list(self, field):
authors = []
for item in field[1][0]:
if len(item) != 1 or not isinstance(item[0], nodes.paragraph):
raise TransformError
authors.append(item[0].children)
if not authors:
raise TransformError
return authors
def authors_from_paragraphs(self, field):
for item in field[1]:
if not isinstance(item, nodes.paragraph):
raise TransformError
authors = [item.children for item in field[1]]
return authors

View File

@ -0,0 +1,173 @@
# Authors: David Goodger, Ueli Schlaepfer, Dmitry Jemerov
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Transforms related to document parts.
"""
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class SectNum(Transform):
"""
Automatically assigns numbers to the titles of document sections.
It is possible to limit the maximum section level for which the numbers
are added. For those sections that are auto-numbered, the "autonum"
attribute is set, informing the contents table generator that a different
form of the TOC should be used.
"""
default_priority = 710
"""Should be applied before `Contents`."""
def apply(self):
self.maxdepth = self.startnode.details.get('depth', sys.maxint)
self.startnode.parent.remove(self.startnode)
self.update_section_numbers(self.document)
def update_section_numbers(self, node, prefix=(), depth=0):
depth += 1
sectnum = 1
for child in node:
if isinstance(child, nodes.section):
numbers = prefix + (str(sectnum),)
title = child[0]
# Use &nbsp; for spacing:
generated = nodes.generated(
'', '.'.join(numbers) + u'\u00a0' * 3, CLASS='sectnum')
title.insert(0, generated)
title['auto'] = 1
if depth < self.maxdepth:
self.update_section_numbers(child, numbers, depth)
sectnum += 1
class Contents(Transform):
"""
This transform generates a table of contents from the entire document tree
or from a single branch. It locates "section" elements and builds them
into a nested bullet list, which is placed within a "topic". A title is
either explicitly specified, taken from the appropriate language module,
or omitted (local table of contents). The depth may be specified.
Two-way references between the table of contents and section titles are
generated (requires Writer support).
This transform requires a startnode, which which contains generation
options and provides the location for the generated table of contents (the
startnode is replaced by the table of contents "topic").
"""
default_priority = 720
def apply(self):
topic = nodes.topic(CLASS='contents')
title = self.startnode.details['title']
if self.startnode.details.has_key('local'):
startnode = self.startnode.parent
# @@@ generate an error if the startnode (directive) not at
# section/document top-level? Drag it up until it is?
while not isinstance(startnode, nodes.Structural):
startnode = startnode.parent
else:
startnode = self.document
if not title:
title = nodes.title('', self.language.labels['contents'])
if title:
name = title.astext()
topic += title
else:
name = self.language.labels['contents']
name = utils.normalize_name(name)
if not self.document.has_name(name):
topic['name'] = name
self.document.note_implicit_target(topic)
self.toc_id = topic['id']
if self.startnode.details.has_key('backlinks'):
self.backlinks = self.startnode.details['backlinks']
else:
self.backlinks = self.document.settings.toc_backlinks
contents = self.build_contents(startnode)
if len(contents):
topic += contents
self.startnode.parent.replace(self.startnode, topic)
else:
self.startnode.parent.remove(self.startnode)
def build_contents(self, node, level=0):
level += 1
sections = []
i = len(node) - 1
while i >= 0 and isinstance(node[i], nodes.section):
sections.append(node[i])
i -= 1
sections.reverse()
entries = []
autonum = 0
depth = self.startnode.details.get('depth', sys.maxint)
for section in sections:
title = section[0]
auto = title.get('auto') # May be set by SectNum.
entrytext = self.copy_and_filter(title)
reference = nodes.reference('', '', refid=section['id'],
*entrytext)
ref_id = self.document.set_id(reference)
entry = nodes.paragraph('', '', reference)
item = nodes.list_item('', entry)
if self.backlinks == 'entry':
title['refid'] = ref_id
elif self.backlinks == 'top':
title['refid'] = self.toc_id
if level < depth:
subsects = self.build_contents(section, level)
item += subsects
entries.append(item)
if entries:
contents = nodes.bullet_list('', *entries)
if auto:
contents.set_class('auto-toc')
return contents
else:
return []
def copy_and_filter(self, node):
"""Return a copy of a title, with references, images, etc. removed."""
visitor = ContentsFilter(self.document)
node.walkabout(visitor)
return visitor.get_entry_text()
class ContentsFilter(nodes.TreeCopyVisitor):
def get_entry_text(self):
return self.get_tree_copy().get_children()
def visit_citation_reference(self, node):
raise nodes.SkipNode
def visit_footnote_reference(self, node):
raise nodes.SkipNode
def visit_image(self, node):
if node.hasattr('alt'):
self.parent.append(nodes.Text(node['alt']))
raise nodes.SkipNode
def ignore_node_but_process_children(self, node):
raise nodes.SkipDeparture
visit_interpreted = ignore_node_but_process_children
visit_problematic = ignore_node_but_process_children
visit_reference = ignore_node_but_process_children
visit_target = ignore_node_but_process_children

276
docutils/transforms/peps.py Normal file
View File

@ -0,0 +1,276 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Transforms for PEP processing.
- `Headers`: Used to transform a PEP's initial RFC-2822 header. It remains a
field list, but some entries get processed.
- `Contents`: Auto-inserts a table of contents.
- `PEPZero`: Special processing for PEP 0.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import time
from docutils import nodes, utils
from docutils import ApplicationError, DataError
from docutils.transforms import Transform, TransformError
from docutils.transforms import parts, references
class Headers(Transform):
"""
Process fields in a PEP's initial RFC-2822 header.
"""
default_priority = 360
pep_url = 'pep-%04d.html'
pep_cvs_url = ('http://cvs.sourceforge.net/cgi-bin/viewcvs.cgi/python/'
'python/nondist/peps/pep-%04d.txt')
rcs_keyword_substitutions = (
(re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),)
def apply(self):
if not len(self.document):
raise DataError('Document tree is empty.')
header = self.document[0]
if not isinstance(header, nodes.field_list) or \
header.get('class') != 'rfc2822':
raise DataError('Document does not begin with an RFC-2822 '
'header; it is not a PEP.')
pep = title = None
for field in header:
if field[0].astext().lower() == 'pep': # should be the first field
value = field[1].astext()
try:
pep = int(value)
cvs_url = self.pep_cvs_url % pep
except ValueError:
pep = value
cvs_url = None
msg = self.document.reporter.warning(
'"PEP" header must contain an integer; "%s" is an '
'invalid value.' % pep, base_node=field)
msgid = self.document.set_id(msg)
prb = nodes.problematic(value, value or '(none)',
refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
if len(field[1]):
field[1][0][:] = [prb]
else:
field[1] += nodes.paragraph('', '', prb)
break
if pep is None:
raise DataError('Document does not contain an RFC-2822 "PEP" '
'header.')
if pep == 0:
# Special processing for PEP 0.
pending = nodes.pending(PEPZero)
self.document.insert(1, pending)
self.document.note_pending(pending)
for field in header:
name = field[0].astext().lower()
body = field[1]
if len(body) > 1:
raise DataError('PEP header field body contains multiple '
'elements:\n%s' % field.pformat(level=1))
elif len(body) == 1:
if not isinstance(body[0], nodes.paragraph):
raise DataError('PEP header field body may only contain '
'a single paragraph:\n%s'
% field.pformat(level=1))
elif name == 'last-modified':
date = time.strftime(
'%d-%b-%Y',
time.localtime(os.stat(self.document['source'])[8]))
if cvs_url:
body += nodes.paragraph(
'', '', nodes.reference('', date, refuri=cvs_url))
else:
# empty
continue
para = body[0]
if name == 'author':
for node in para:
if isinstance(node, nodes.reference):
node.parent.replace(node, mask_email(node))
elif name == 'discussions-to':
for node in para:
if isinstance(node, nodes.reference):
node.parent.replace(node, mask_email(node, pep))
elif name in ('replaces', 'replaced-by', 'requires'):
newbody = []
space = nodes.Text(' ')
for refpep in re.split(',?\s+', body.astext()):
pepno = int(refpep)
newbody.append(nodes.reference(
refpep, refpep, refuri=self.pep_url % pepno))
newbody.append(space)
para[:] = newbody[:-1] # drop trailing space
elif name == 'last-modified':
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
if cvs_url:
date = para.astext()
para[:] = [nodes.reference('', date, refuri=cvs_url)]
elif name == 'content-type':
pep_type = para.astext()
uri = self.pep_url % 12
para[:] = [nodes.reference('', pep_type, refuri=uri)]
elif name == 'version' and len(body):
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
class Contents(Transform):
"""
Insert a table of contents transform placeholder into the document after
the RFC 2822 header.
"""
default_priority = 380
def apply(self):
pending = nodes.pending(parts.Contents, {'title': None})
self.document.insert(1, pending)
self.document.note_pending(pending)
class TargetNotes(Transform):
"""
Locate the "References" section, insert a placeholder for an external
target footnote insertion transform at the end, and run the transform.
"""
default_priority = 520
def apply(self):
doc = self.document
i = len(doc) - 1
refsect = copyright = None
while i >= 0 and isinstance(doc[i], nodes.section):
title_words = doc[i][0].astext().lower().split()
if 'references' in title_words:
refsect = doc[i]
break
elif 'copyright' in title_words:
copyright = i
i -= 1
if not refsect:
refsect = nodes.section()
refsect += nodes.title('', 'References')
if copyright:
# Put the new "References" section before "Copyright":
doc.insert(copyright, refsect)
else:
# Put the new "References" section at end of doc:
doc.append(refsect)
pending = nodes.pending(references.TargetNotes)
refsect.append(pending)
self.document.note_pending(pending, 0)
class PEPZero(Transform):
"""
Special processing for PEP 0.
"""
default_priority =760
def apply(self):
visitor = PEPZeroSpecial(self.document)
self.document.walk(visitor)
self.startnode.parent.remove(self.startnode)
class PEPZeroSpecial(nodes.SparseNodeVisitor):
"""
Perform the special processing needed by PEP 0:
- Mask email addresses.
- Link PEP numbers in the second column of 4-column tables to the PEPs
themselves.
"""
pep_url = Headers.pep_url
def unknown_visit(self, node):
pass
def visit_reference(self, node):
node.parent.replace(node, mask_email(node))
def visit_field_list(self, node):
if node.hasattr('class') and node['class'] == 'rfc2822':
raise nodes.SkipNode
def visit_tgroup(self, node):
self.pep_table = node['cols'] == 4
self.entry = 0
def visit_colspec(self, node):
self.entry += 1
if self.pep_table and self.entry == 2:
node['class'] = 'num'
def visit_row(self, node):
self.entry = 0
def visit_entry(self, node):
self.entry += 1
if self.pep_table and self.entry == 2 and len(node) == 1:
node['class'] = 'num'
p = node[0]
if isinstance(p, nodes.paragraph) and len(p) == 1:
text = p.astext()
try:
pep = int(text)
ref = self.pep_url % pep
p[0] = nodes.reference(text, text, refuri=ref)
except ValueError:
pass
non_masked_addresses = ('peps@python.org',
'python-list@python.org',
'python-dev@python.org')
def mask_email(ref, pepno=None):
"""
Mask the email address in `ref` and return a replacement node.
`ref` is returned unchanged if it contains no email address.
For email addresses such as "user@host", mask the address as "user at
host" (text) to thwart simple email address harvesters (except for those
listed in `non_masked_addresses`). If a PEP number (`pepno`) is given,
return a reference including a default email subject.
"""
if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'):
if ref['refuri'][8:] in non_masked_addresses:
replacement = ref[0]
else:
replacement_text = ref.astext().replace('@', '&#32;&#97;t&#32;')
replacement = nodes.raw('', replacement_text, format='html')
if pepno is None:
return replacement
else:
ref['refuri'] += '?subject=PEP%%20%s' % pepno
ref[:] = [replacement]
return ref
else:
return ref

View File

@ -0,0 +1,739 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Transforms for resolving references.
"""
__docformat__ = 'reStructuredText'
import sys
import re
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
indices = xrange(sys.maxint)
class ChainedTargets(Transform):
"""
Attributes "refuri" and "refname" are migrated from the final direct
target up the chain of contiguous adjacent internal targets, using
`ChainedTargetResolver`.
"""
default_priority = 420
def apply(self):
visitor = ChainedTargetResolver(self.document)
self.document.walk(visitor)
class ChainedTargetResolver(nodes.SparseNodeVisitor):
"""
Copy reference attributes up the length of a hyperlink target chain.
"Chained targets" are multiple adjacent internal hyperlink targets which
"point to" an external or indirect target. After the transform, all
chained targets will effectively point to the same place.
Given the following ``document`` as input::
<document>
<target id="a" name="a">
<target id="b" name="b">
<target id="c" name="c" refuri="http://chained.external.targets">
<target id="d" name="d">
<paragraph>
I'm known as "d".
<target id="e" name="e">
<target id="id1">
<target id="f" name="f" refname="d">
``ChainedTargetResolver(document).walk()`` will transform the above into::
<document>
<target id="a" name="a" refuri="http://chained.external.targets">
<target id="b" name="b" refuri="http://chained.external.targets">
<target id="c" name="c" refuri="http://chained.external.targets">
<target id="d" name="d">
<paragraph>
I'm known as "d".
<target id="e" name="e" refname="d">
<target id="id1" refname="d">
<target id="f" name="f" refname="d">
"""
def unknown_visit(self, node):
pass
def visit_target(self, node):
if node.hasattr('refuri'):
attname = 'refuri'
call_if_named = self.document.note_external_target
elif node.hasattr('refname'):
attname = 'refname'
call_if_named = self.document.note_indirect_target
elif node.hasattr('refid'):
attname = 'refid'
call_if_named = None
else:
return
attval = node[attname]
index = node.parent.index(node)
for i in range(index - 1, -1, -1):
sibling = node.parent[i]
if not isinstance(sibling, nodes.target) \
or sibling.hasattr('refuri') \
or sibling.hasattr('refname') \
or sibling.hasattr('refid'):
break
sibling[attname] = attval
if sibling.hasattr('name') and call_if_named:
call_if_named(sibling)
class AnonymousHyperlinks(Transform):
"""
Link anonymous references to targets. Given::
<paragraph>
<reference anonymous="1">
internal
<reference anonymous="1">
external
<target anonymous="1" id="id1">
<target anonymous="1" id="id2" refuri="http://external">
Corresponding references are linked via "refid" or resolved via "refuri"::
<paragraph>
<reference anonymous="1" refid="id1">
text
<reference anonymous="1" refuri="http://external">
external
<target anonymous="1" id="id1">
<target anonymous="1" id="id2" refuri="http://external">
"""
default_priority = 440
def apply(self):
if len(self.document.anonymous_refs) \
!= len(self.document.anonymous_targets):
msg = self.document.reporter.error(
'Anonymous hyperlink mismatch: %s references but %s '
'targets.\nSee "backrefs" attribute for IDs.'
% (len(self.document.anonymous_refs),
len(self.document.anonymous_targets)))
msgid = self.document.set_id(msg)
for ref in self.document.anonymous_refs:
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.parent.replace(ref, prb)
return
for ref, target in zip(self.document.anonymous_refs,
self.document.anonymous_targets):
if target.hasattr('refuri'):
ref['refuri'] = target['refuri']
ref.resolved = 1
else:
ref['refid'] = target['id']
self.document.note_refid(ref)
target.referenced = 1
class IndirectHyperlinks(Transform):
"""
a) Indirect external references::
<paragraph>
<reference refname="indirect external">
indirect external
<target id="id1" name="direct external"
refuri="http://indirect">
<target id="id2" name="indirect external"
refname="direct external">
The "refuri" attribute is migrated back to all indirect targets
from the final direct target (i.e. a target not referring to
another indirect target)::
<paragraph>
<reference refname="indirect external">
indirect external
<target id="id1" name="direct external"
refuri="http://indirect">
<target id="id2" name="indirect external"
refuri="http://indirect">
Once the attribute is migrated, the preexisting "refname" attribute
is dropped.
b) Indirect internal references::
<target id="id1" name="final target">
<paragraph>
<reference refname="indirect internal">
indirect internal
<target id="id2" name="indirect internal 2"
refname="final target">
<target id="id3" name="indirect internal"
refname="indirect internal 2">
Targets which indirectly refer to an internal target become one-hop
indirect (their "refid" attributes are directly set to the internal
target's "id"). References which indirectly refer to an internal
target become direct internal references::
<target id="id1" name="final target">
<paragraph>
<reference refid="id1">
indirect internal
<target id="id2" name="indirect internal 2" refid="id1">
<target id="id3" name="indirect internal" refid="id1">
"""
default_priority = 460
def apply(self):
for target in self.document.indirect_targets:
if not target.resolved:
self.resolve_indirect_target(target)
self.resolve_indirect_references(target)
def resolve_indirect_target(self, target):
refname = target['refname']
reftarget_id = self.document.nameids.get(refname)
if not reftarget_id:
self.nonexistent_indirect_target(target)
return
reftarget = self.document.ids[reftarget_id]
if isinstance(reftarget, nodes.target) \
and not reftarget.resolved and reftarget.hasattr('refname'):
self.one_indirect_target(reftarget) # multiply indirect
if reftarget.hasattr('refuri'):
target['refuri'] = reftarget['refuri']
if target.hasattr('name'):
self.document.note_external_target(target)
elif reftarget.hasattr('refid'):
target['refid'] = reftarget['refid']
self.document.note_refid(target)
else:
try:
target['refid'] = reftarget['id']
self.document.note_refid(target)
except KeyError:
self.nonexistent_indirect_target(target)
return
del target['refname']
target.resolved = 1
reftarget.referenced = 1
def nonexistent_indirect_target(self, target):
naming = ''
if target.hasattr('name'):
naming = '"%s" ' % target['name']
reflist = self.document.refnames.get(target['name'], [])
else:
reflist = self.document.refids.get(target['id'], [])
naming += '(id="%s")' % target['id']
msg = self.document.reporter.warning(
'Indirect hyperlink target %s refers to target "%s", '
'which does not exist.' % (naming, target['refname']),
base_node=target)
msgid = self.document.set_id(msg)
for ref in reflist:
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.parent.replace(ref, prb)
target.resolved = 1
def resolve_indirect_references(self, target):
if target.hasattr('refid'):
attname = 'refid'
call_if_named = 0
call_method = self.document.note_refid
elif target.hasattr('refuri'):
attname = 'refuri'
call_if_named = 1
call_method = self.document.note_external_target
else:
return
attval = target[attname]
if target.hasattr('name'):
name = target['name']
try:
reflist = self.document.refnames[name]
except KeyError, instance:
if target.referenced:
return
msg = self.document.reporter.info(
'Indirect hyperlink target "%s" is not referenced.'
% name, base_node=target)
target.referenced = 1
return
delatt = 'refname'
else:
id = target['id']
try:
reflist = self.document.refids[id]
except KeyError, instance:
if target.referenced:
return
msg = self.document.reporter.info(
'Indirect hyperlink target id="%s" is not referenced.'
% id, base_node=target)
target.referenced = 1
return
delatt = 'refid'
for ref in reflist:
if ref.resolved:
continue
del ref[delatt]
ref[attname] = attval
if not call_if_named or ref.hasattr('name'):
call_method(ref)
ref.resolved = 1
if isinstance(ref, nodes.target):
self.resolve_indirect_references(ref)
target.referenced = 1
class ExternalTargets(Transform):
"""
Given::
<paragraph>
<reference refname="direct external">
direct external
<target id="id1" name="direct external" refuri="http://direct">
The "refname" attribute is replaced by the direct "refuri" attribute::
<paragraph>
<reference refuri="http://direct">
direct external
<target id="id1" name="direct external" refuri="http://direct">
"""
default_priority = 640
def apply(self):
for target in self.document.external_targets:
if target.hasattr('refuri') and target.hasattr('name'):
name = target['name']
refuri = target['refuri']
try:
reflist = self.document.refnames[name]
except KeyError, instance:
if target.referenced:
continue
msg = self.document.reporter.info(
'External hyperlink target "%s" is not referenced.'
% name, base_node=target)
target.referenced = 1
continue
for ref in reflist:
if ref.resolved:
continue
del ref['refname']
ref['refuri'] = refuri
ref.resolved = 1
target.referenced = 1
class InternalTargets(Transform):
"""
Given::
<paragraph>
<reference refname="direct internal">
direct internal
<target id="id1" name="direct internal">
The "refname" attribute is replaced by "refid" linking to the target's
"id"::
<paragraph>
<reference refid="id1">
direct internal
<target id="id1" name="direct internal">
"""
default_priority = 660
def apply(self):
for target in self.document.internal_targets:
if target.hasattr('refuri') or target.hasattr('refid') \
or not target.hasattr('name'):
continue
name = target['name']
refid = target['id']
try:
reflist = self.document.refnames[name]
except KeyError, instance:
if target.referenced:
continue
msg = self.document.reporter.info(
'Internal hyperlink target "%s" is not referenced.'
% name, base_node=target)
target.referenced = 1
continue
for ref in reflist:
if ref.resolved:
continue
del ref['refname']
ref['refid'] = refid
ref.resolved = 1
target.referenced = 1
class Footnotes(Transform):
"""
Assign numbers to autonumbered footnotes, and resolve links to footnotes,
citations, and their references.
Given the following ``document`` as input::
<document>
<paragraph>
A labeled autonumbered footnote referece:
<footnote_reference auto="1" id="id1" refname="footnote">
<paragraph>
An unlabeled autonumbered footnote referece:
<footnote_reference auto="1" id="id2">
<footnote auto="1" id="id3">
<paragraph>
Unlabeled autonumbered footnote.
<footnote auto="1" id="footnote" name="footnote">
<paragraph>
Labeled autonumbered footnote.
Auto-numbered footnotes have attribute ``auto="1"`` and no label.
Auto-numbered footnote_references have no reference text (they're
empty elements). When resolving the numbering, a ``label`` element
is added to the beginning of the ``footnote``, and reference text
to the ``footnote_reference``.
The transformed result will be::
<document>
<paragraph>
A labeled autonumbered footnote referece:
<footnote_reference auto="1" id="id1" refid="footnote">
2
<paragraph>
An unlabeled autonumbered footnote referece:
<footnote_reference auto="1" id="id2" refid="id3">
1
<footnote auto="1" id="id3" backrefs="id2">
<label>
1
<paragraph>
Unlabeled autonumbered footnote.
<footnote auto="1" id="footnote" name="footnote" backrefs="id1">
<label>
2
<paragraph>
Labeled autonumbered footnote.
Note that the footnotes are not in the same order as the references.
The labels and reference text are added to the auto-numbered ``footnote``
and ``footnote_reference`` elements. Footnote elements are backlinked to
their references via "refids" attributes. References are assigned "id"
and "refid" attributes.
After adding labels and reference text, the "auto" attributes can be
ignored.
"""
default_priority = 620
autofootnote_labels = None
"""Keep track of unlabeled autonumbered footnotes."""
symbols = [
# Entries 1-4 and 6 below are from section 12.51 of
# The Chicago Manual of Style, 14th edition.
'*', # asterisk/star
u'\u2020', # dagger &dagger;
u'\u2021', # double dagger &Dagger;
u'\u00A7', # section mark &sect;
u'\u00B6', # paragraph mark (pilcrow) &para;
# (parallels ['||'] in CMoS)
'#', # number sign
# The entries below were chosen arbitrarily.
u'\u2660', # spade suit &spades;
u'\u2665', # heart suit &hearts;
u'\u2666', # diamond suit &diams;
u'\u2663', # club suit &clubs;
]
def apply(self):
self.autofootnote_labels = []
startnum = self.document.autofootnote_start
self.document.autofootnote_start = self.number_footnotes(startnum)
self.number_footnote_references(startnum)
self.symbolize_footnotes()
self.resolve_footnotes_and_citations()
def number_footnotes(self, startnum):
"""
Assign numbers to autonumbered footnotes.
For labeled autonumbered footnotes, copy the number over to
corresponding footnote references.
"""
for footnote in self.document.autofootnotes:
while 1:
label = str(startnum)
startnum += 1
if not self.document.nameids.has_key(label):
break
footnote.insert(0, nodes.label('', label))
if footnote.hasattr('dupname'):
continue
if footnote.hasattr('name'):
name = footnote['name']
for ref in self.document.footnote_refs.get(name, []):
ref += nodes.Text(label)
ref.delattr('refname')
ref['refid'] = footnote['id']
footnote.add_backref(ref['id'])
self.document.note_refid(ref)
ref.resolved = 1
else:
footnote['name'] = label
self.document.note_explicit_target(footnote, footnote)
self.autofootnote_labels.append(label)
return startnum
def number_footnote_references(self, startnum):
"""Assign numbers to autonumbered footnote references."""
i = 0
for ref in self.document.autofootnote_refs:
if ref.resolved or ref.hasattr('refid'):
continue
try:
label = self.autofootnote_labels[i]
except IndexError:
msg = self.document.reporter.error(
'Too many autonumbered footnote references: only %s '
'corresponding footnotes available.'
% len(self.autofootnote_labels), base_node=ref)
msgid = self.document.set_id(msg)
for ref in self.document.autofootnote_refs[i:]:
if ref.resolved or ref.hasattr('refname'):
continue
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.parent.replace(ref, prb)
break
ref += nodes.Text(label)
id = self.document.nameids[label]
footnote = self.document.ids[id]
ref['refid'] = id
self.document.note_refid(ref)
footnote.add_backref(ref['id'])
ref.resolved = 1
i += 1
def symbolize_footnotes(self):
"""Add symbols indexes to "[*]"-style footnotes and references."""
labels = []
for footnote in self.document.symbol_footnotes:
reps, index = divmod(self.document.symbol_footnote_start,
len(self.symbols))
labeltext = self.symbols[index] * (reps + 1)
labels.append(labeltext)
footnote.insert(0, nodes.label('', labeltext))
self.document.symbol_footnote_start += 1
self.document.set_id(footnote)
i = 0
for ref in self.document.symbol_footnote_refs:
try:
ref += nodes.Text(labels[i])
except IndexError:
msg = self.document.reporter.error(
'Too many symbol footnote references: only %s '
'corresponding footnotes available.' % len(labels),
base_node=ref)
msgid = self.document.set_id(msg)
for ref in self.document.symbol_footnote_refs[i:]:
if ref.resolved or ref.hasattr('refid'):
continue
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.parent.replace(ref, prb)
break
footnote = self.document.symbol_footnotes[i]
ref['refid'] = footnote['id']
self.document.note_refid(ref)
footnote.add_backref(ref['id'])
i += 1
def resolve_footnotes_and_citations(self):
"""
Link manually-labeled footnotes and citations to/from their
references.
"""
for footnote in self.document.footnotes:
label = footnote['name']
if self.document.footnote_refs.has_key(label):
reflist = self.document.footnote_refs[label]
self.resolve_references(footnote, reflist)
for citation in self.document.citations:
label = citation['name']
if self.document.citation_refs.has_key(label):
reflist = self.document.citation_refs[label]
self.resolve_references(citation, reflist)
def resolve_references(self, note, reflist):
id = note['id']
for ref in reflist:
if ref.resolved:
continue
ref.delattr('refname')
ref['refid'] = id
note.add_backref(ref['id'])
ref.resolved = 1
note.resolved = 1
class Substitutions(Transform):
"""
Given the following ``document`` as input::
<document>
<paragraph>
The
<substitution_reference refname="biohazard">
biohazard
symbol is deservedly scary-looking.
<substitution_definition name="biohazard">
<image alt="biohazard" uri="biohazard.png">
The ``substitution_reference`` will simply be replaced by the
contents of the corresponding ``substitution_definition``.
The transformed result will be::
<document>
<paragraph>
The
<image alt="biohazard" uri="biohazard.png">
symbol is deservedly scary-looking.
<substitution_definition name="biohazard">
<image alt="biohazard" uri="biohazard.png">
"""
default_priority = 220
"""The Substitutions transform has to be applied very early, before
`docutils.tranforms.frontmatter.DocTitle` and others."""
def apply(self):
defs = self.document.substitution_defs
for refname, refs in self.document.substitution_refs.items():
for ref in refs:
if defs.has_key(refname):
ref.parent.replace(ref, defs[refname].get_children())
else:
msg = self.document.reporter.error(
'Undefined substitution referenced: "%s".'
% refname, base_node=ref)
msgid = self.document.set_id(msg)
prb = nodes.problematic(
ref.rawsource, ref.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
ref.parent.replace(ref, prb)
self.document.substitution_refs = None # release replaced references
class TargetNotes(Transform):
"""
Creates a footnote for each external target in the text, and corresponding
footnote references after each reference.
"""
default_priority = 540
"""The TargetNotes transform has to be applied after `IndirectHyperlinks`
but before `Footnotes`."""
def apply(self):
notes = {}
nodelist = []
for target in self.document.external_targets:
name = target.get('name')
if not name:
print >>sys.stderr, 'no name on target: %r' % target
continue
refs = self.document.refnames.get(name, [])
if not refs:
continue
footnote = self.make_target_footnote(target, refs, notes)
if not notes.has_key(target['refuri']):
notes[target['refuri']] = footnote
nodelist.append(footnote)
if len(self.document.anonymous_targets) \
== len(self.document.anonymous_refs):
for target, ref in zip(self.document.anonymous_targets,
self.document.anonymous_refs):
if target.hasattr('refuri'):
footnote = self.make_target_footnote(target, [ref], notes)
if not notes.has_key(target['refuri']):
notes[target['refuri']] = footnote
nodelist.append(footnote)
self.startnode.parent.replace(self.startnode, nodelist)
def make_target_footnote(self, target, refs, notes):
refuri = target['refuri']
if notes.has_key(refuri): # duplicate?
footnote = notes[refuri]
footnote_name = footnote['name']
else: # original
footnote = nodes.footnote()
footnote_id = self.document.set_id(footnote)
# Use a colon; they can't be produced inside names by the parser:
footnote_name = 'target_note: ' + footnote_id
footnote['auto'] = 1
footnote['name'] = footnote_name
footnote_paragraph = nodes.paragraph()
footnote_paragraph += nodes.reference('', refuri, refuri=refuri)
footnote += footnote_paragraph
self.document.note_autofootnote(footnote)
self.document.note_explicit_target(footnote, footnote)
for ref in refs:
if isinstance(ref, nodes.target):
continue
refnode = nodes.footnote_reference(
refname=footnote_name, auto=1)
self.document.note_autofootnote_ref(refnode)
self.document.note_footnote_ref(refnode)
index = ref.parent.index(ref) + 1
reflist = [nodes.Text(' '), refnode]
ref.parent.insert(index, reflist)
return footnote

View File

@ -0,0 +1,185 @@
# Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Transforms needed by most or all documents:
- `Decorations`: Generate a document's header & footer.
- `Messages`: Placement of system messages stored in
`nodes.document.transform_messages`.
- `TestMessages`: Like `Messages`, used on test runs.
- `FinalReferences`: Resolve remaining references.
"""
__docformat__ = 'reStructuredText'
import re
import sys
import time
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
class Decorations(Transform):
"""
Populate a document's decoration element (header, footer).
"""
default_priority = 820
def apply(self):
header = self.generate_header()
footer = self.generate_footer()
if header or footer:
decoration = nodes.decoration()
decoration += header
decoration += footer
document = self.document
index = document.first_child_not_matching_class(
nodes.PreDecorative)
if index is None:
document += decoration
else:
document[index:index] = [decoration]
def generate_header(self):
return None
def generate_footer(self):
# @@@ Text is hard-coded for now.
# Should be made dynamic (language-dependent).
settings = self.document.settings
if settings.generator or settings.datestamp or settings.source_link \
or settings.source_url:
text = []
if settings.source_link and settings._source \
or settings.source_url:
if settings.source_url:
source = settings.source_url
else:
source = utils.relative_path(settings._destination,
settings._source)
text.extend([
nodes.reference('', 'View document source',
refuri=source),
nodes.Text('.\n')])
if settings.datestamp:
datestamp = time.strftime(settings.datestamp, time.gmtime())
text.append(nodes.Text('Generated on: ' + datestamp + '.\n'))
if settings.generator:
text.extend([
nodes.Text('Generated by '),
nodes.reference('', 'Docutils', refuri=
'http://docutils.sourceforge.net/'),
nodes.Text(' from '),
nodes.reference('', 'reStructuredText', refuri='http://'
'docutils.sourceforge.net/rst.html'),
nodes.Text(' source.\n')])
footer = nodes.footer()
footer += nodes.paragraph('', '', *text)
return footer
else:
return None
class Messages(Transform):
"""
Place any system messages generated after parsing into a dedicated section
of the document.
"""
default_priority = 860
def apply(self):
unfiltered = self.document.transform_messages
threshold = self.document.reporter['writer'].report_level
messages = []
for msg in unfiltered:
if msg['level'] >= threshold and not msg.parent:
messages.append(msg)
if len(messages) > 0:
section = nodes.section(CLASS='system-messages')
# @@@ get this from the language module?
section += nodes.title('', 'Docutils System Messages')
section += messages
self.document.transform_messages[:] = []
self.document += section
class TestMessages(Transform):
"""
Append all post-parse system messages to the end of the document.
"""
default_priority = 890
def apply(self):
for msg in self.document.transform_messages:
if not msg.parent:
self.document += msg
class FinalChecks(Transform):
"""
Perform last-minute checks.
- Check for dangling references (incl. footnote & citation).
"""
default_priority = 840
def apply(self):
visitor = FinalCheckVisitor(self.document)
self.document.walk(visitor)
if self.document.settings.expose_internals:
visitor = InternalAttributeExposer(self.document)
self.document.walk(visitor)
class FinalCheckVisitor(nodes.SparseNodeVisitor):
def unknown_visit(self, node):
pass
def visit_reference(self, node):
if node.resolved or not node.hasattr('refname'):
return
refname = node['refname']
id = self.document.nameids.get(refname)
if id is None:
msg = self.document.reporter.error(
'Unknown target name: "%s".' % (node['refname']),
base_node=node)
msgid = self.document.set_id(msg)
prb = nodes.problematic(
node.rawsource, node.rawsource, refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
node.parent.replace(node, prb)
else:
del node['refname']
node['refid'] = id
self.document.ids[id].referenced = 1
node.resolved = 1
visit_footnote_reference = visit_citation_reference = visit_reference
class InternalAttributeExposer(nodes.GenericNodeVisitor):
def __init__(self, document):
nodes.GenericNodeVisitor.__init__(self, document)
self.internal_attributes = document.settings.expose_internals
def default_visit(self, node):
for att in self.internal_attributes:
value = getattr(node, att, None)
if value is not None:
node['internal:' + att] = value

105
docutils/urischemes.py Normal file
View File

@ -0,0 +1,105 @@
"""
`schemes` is a dictionary with lowercase URI addressing schemes as
keys and descriptions as values. It was compiled from the index at
http://www.w3.org/Addressing/schemes.html (revised 2001-08-20).
"""
# Many values are blank and should be filled in with useful descriptions.
schemes = {
'about': 'provides information on Navigator',
'acap': 'Application Configuration Access Protocol',
'addbook': "To add vCard entries to Communicator's Address Book",
'afp': 'Apple Filing Protocol',
'afs': 'Andrew File System global file names',
'aim': 'AOL Instant Messenger',
'callto': 'for NetMeeting links',
'castanet': 'Castanet Tuner URLs for Netcaster',
'chttp': 'cached HTTP supported by RealPlayer',
'cid': 'content identifier',
'data': ('allows inclusion of small data items as "immediate" data; '
'RFC 2397'),
'dav': 'Distributed Authoring and Versioning Protocol; RFC 2518',
'dns': 'Domain Name System resources',
'eid': ('External ID; non-URL data; general escape mechanism to allow '
'access to information for applications that are too '
'specialized to justify their own schemes'),
'fax': ('a connection to a terminal that can handle telefaxes '
'(facsimiles); RFC 2806'),
'file': 'Host-specific file names',
'finger': '',
'freenet': '',
'ftp': 'File Transfer Protocol',
'gopher': 'The Gopher Protocol',
'gsm-sms': ('Global System for Mobile Communications Short Message '
'Service'),
'h323': 'video (audiovisual) communication on local area networks',
'h324': ('video and audio communications over low bitrate connections '
'such as POTS modem connections'),
'hdl': 'CNRI handle system',
'hnews': 'an HTTP-tunneling variant of the NNTP news protocol',
'http': 'Hypertext Transfer Protocol',
'https': 'HTTP over SSL',
'iioploc': 'Internet Inter-ORB Protocol Location?',
'ilu': 'Inter-Language Unification',
'imap': 'Internet Message Access Protocol',
'ior': 'CORBA interoperable object reference',
'ipp': 'Internet Printing Protocol',
'irc': 'Internet Relay Chat',
'jar': 'Java archive',
'javascript': ('JavaScript code; evaluates the expression after the '
'colon'),
'jdbc': '',
'ldap': 'Lightweight Directory Access Protocol',
'lifn': '',
'livescript': '',
'lrq': '',
'mailbox': 'Mail folder access',
'mailserver': 'Access to data available from mail servers',
'mailto': 'Electronic mail address',
'md5': '',
'mid': 'message identifier',
'mocha': '',
'modem': ('a connection to a terminal that can handle incoming data '
'calls; RFC 2806'),
'news': 'USENET news',
'nfs': 'Network File System protocol',
'nntp': 'USENET news using NNTP access',
'opaquelocktoken': '',
'phone': '',
'pop': 'Post Office Protocol',
'pop3': 'Post Office Protocol v3',
'printer': '',
'prospero': 'Prospero Directory Service',
'res': '',
'rtsp': 'real time streaming protocol',
'rvp': '',
'rwhois': '',
'rx': 'Remote Execution',
'sdp': '',
'service': 'service location',
'shttp': 'secure hypertext transfer protocol',
'sip': 'Session Initiation Protocol',
'smb': '',
'snews': 'For NNTP postings via SSL',
't120': 'real time data conferencing (audiographics)',
'tcp': '',
'tel': ('a connection to a terminal that handles normal voice '
'telephone calls, a voice mailbox or another voice messaging '
'system or a service that can be operated using DTMF tones; '
'RFC 2806.'),
'telephone': 'telephone',
'telnet': 'Reference to interactive sessions',
'tip': 'Transaction Internet Protocol',
'tn3270': 'Interactive 3270 emulation sessions',
'tv': '',
'urn': 'Uniform Resource Name',
'uuid': '',
'vemmi': 'versatile multimedia interface',
'videotex': '',
'view-source': 'displays HTML code that was generated with JavaScript',
'wais': 'Wide Area Information Servers',
'whodp': '',
'whois++': 'Distributed directory service.',
'z39.50r': 'Z39.50 Retrieval',
'z39.50s': 'Z39.50 Session',}

426
docutils/utils.py Normal file
View File

@ -0,0 +1,426 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Miscellaneous utilities for the documentation utilities.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import os.path
from types import StringType, UnicodeType
from docutils import ApplicationError, DataError
from docutils import frontend, nodes
class SystemMessage(ApplicationError):
def __init__(self, system_message):
Exception.__init__(self, system_message.astext())
class Reporter:
"""
Info/warning/error reporter and ``system_message`` element generator.
Five levels of system messages are defined, along with corresponding
methods: `debug()`, `info()`, `warning()`, `error()`, and `severe()`.
There is typically one Reporter object per process. A Reporter object is
instantiated with thresholds for reporting (generating warnings) and
halting processing (raising exceptions), a switch to turn debug output on
or off, and an I/O stream for warnings. These are stored in the default
reporting category, '' (zero-length string).
Multiple reporting categories [#]_ may be set, each with its own reporting
and halting thresholds, debugging switch, and warning stream
(collectively a `ConditionSet`). Categories are hierarchical dotted-name
strings that look like attribute references: 'spam', 'spam.eggs',
'neeeow.wum.ping'. The 'spam' category is the ancestor of
'spam.bacon.eggs'. Unset categories inherit stored conditions from their
closest ancestor category that has been set.
When a system message is generated, the stored conditions from its
category (or ancestor if unset) are retrieved. The system message level
is compared to the thresholds stored in the category, and a warning or
error is generated as appropriate. Debug messages are produced iff the
stored debug switch is on. Message output is sent to the stored warning
stream.
The default category is '' (empty string). By convention, Writers should
retrieve reporting conditions from the 'writer' category (which, unless
explicitly set, defaults to the conditions of the default category).
The Reporter class also employs a modified form of the "Observer" pattern
[GoF95]_ to track system messages generated. The `attach_observer` method
should be called before parsing, with a bound method or function which
accepts system messages. The observer can be removed with
`detach_observer`, and another added in its place.
.. [#] The concept of "categories" was inspired by the log4j project:
http://jakarta.apache.org/log4j/.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
levels = 'DEBUG INFO WARNING ERROR SEVERE'.split()
"""List of names for system message levels, indexed by level."""
def __init__(self, source, report_level, halt_level, stream=None,
debug=0):
"""
Initialize the `ConditionSet` forthe `Reporter`'s default category.
:Parameters:
- `source`: The path to or description of the source data.
- `report_level`: The level at or above which warning output will
be sent to `stream`.
- `halt_level`: The level at or above which `SystemMessage`
exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing), or
`None` (implies `sys.stderr`; default).
"""
self.source = source
"""The path to or description of the source data."""
if stream is None:
stream = sys.stderr
elif type(stream) in (StringType, UnicodeType):
raise NotImplementedError('This should open a file for writing.')
self.categories = {'': ConditionSet(debug, report_level, halt_level,
stream)}
"""Mapping of category names to conditions. Default category is ''."""
self.observers = []
"""List of bound methods or functions to call with each system_message
created."""
def set_conditions(self, category, report_level, halt_level,
stream=None, debug=0):
if stream is None:
stream = sys.stderr
self.categories[category] = ConditionSet(debug, report_level,
halt_level, stream)
def unset_conditions(self, category):
if category and self.categories.has_key(category):
del self.categories[category]
__delitem__ = unset_conditions
def get_conditions(self, category):
while not self.categories.has_key(category):
category = category[:category.rfind('.') + 1][:-1]
return self.categories[category]
__getitem__ = get_conditions
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes one
argument, a `nodes.system_message` instance.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self, message):
for observer in self.observers:
observer(message)
def system_message(self, level, message, *children, **kwargs):
"""
Return a system_message object.
Raise an exception or generate a warning if appropriate.
"""
attributes = kwargs.copy()
category = kwargs.get('category', '')
if kwargs.has_key('category'):
del attributes['category']
if kwargs.has_key('base_node'):
source, line = get_source_line(kwargs['base_node'])
del attributes['base_node']
if source is not None:
attributes.setdefault('source', source)
if line is not None:
attributes.setdefault('line', line)
attributes.setdefault('source', self.source)
msg = nodes.system_message(message, level=level,
type=self.levels[level],
*children, **attributes)
debug, report_level, halt_level, stream = self[category].astuple()
if level >= report_level or debug and level == 0:
if category:
print >>stream, msg.astext(), '[%s]' % category
else:
print >>stream, msg.astext()
if level >= halt_level:
raise SystemMessage(msg)
if level > 0 or debug:
self.notify_observers(msg)
return msg
def debug(self, *args, **kwargs):
"""
Level-0, "DEBUG": an internal reporting issue. Typically, there is no
effect on the processing. Level-0 system messages are handled
separately from the others.
"""
return self.system_message(0, *args, **kwargs)
def info(self, *args, **kwargs):
"""
Level-1, "INFO": a minor issue that can be ignored. Typically there is
no effect on processing, and level-1 system messages are not reported.
"""
return self.system_message(1, *args, **kwargs)
def warning(self, *args, **kwargs):
"""
Level-2, "WARNING": an issue that should be addressed. If ignored,
there may be unpredictable problems with the output.
"""
return self.system_message(2, *args, **kwargs)
def error(self, *args, **kwargs):
"""
Level-3, "ERROR": an error that should be addressed. If ignored, the
output will contain errors.
"""
return self.system_message(3, *args, **kwargs)
def severe(self, *args, **kwargs):
"""
Level-4, "SEVERE": a severe error that must be addressed. If ignored,
the output will contain severe errors. Typically level-4 system
messages are turned into exceptions which halt processing.
"""
return self.system_message(4, *args, **kwargs)
class ConditionSet:
"""
A set of two thresholds (`report_level` & `halt_level`), a switch
(`debug`), and an I/O stream (`stream`), corresponding to one `Reporter`
category.
"""
def __init__(self, debug, report_level, halt_level, stream):
self.debug = debug
self.report_level = report_level
self.halt_level = halt_level
self.stream = stream
def astuple(self):
return (self.debug, self.report_level, self.halt_level,
self.stream)
class ExtensionOptionError(DataError): pass
class BadOptionError(ExtensionOptionError): pass
class BadOptionDataError(ExtensionOptionError): pass
class DuplicateOptionError(ExtensionOptionError): pass
def extract_extension_options(field_list, options_spec):
"""
Return a dictionary mapping extension option names to converted values.
:Parameters:
- `field_list`: A flat field list without field arguments, where each
field body consists of a single paragraph only.
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion
function).
- `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = extract_options(field_list)
option_dict = assemble_option_dict(option_list, options_spec)
return option_dict
def extract_options(field_list):
"""
Return a list of option (name, value) pairs from field names & bodies.
:Parameter:
`field_list`: A flat field list, where each field name is a single
word and each field body consists of a single paragraph only.
:Exceptions:
- `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name,
missing data, bad quotes, etc.).
"""
option_list = []
for field in field_list:
if len(field[0].astext().split()) != 1:
raise BadOptionError(
'extension option field name may not contain multiple words')
name = str(field[0].astext().lower())
body = field[1]
if len(body) == 0:
data = None
elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
else:
data = body[0][0].astext()
option_list.append((name, data))
return option_list
def assemble_option_dict(option_list, options_spec):
"""
Return a mapping of option names to values.
:Parameters:
- `option_list`: A list of (name, value) pairs (the output of
`extract_options()`).
- `options_spec`: Dictionary mapping known option names to a
conversion function such as `int` or `float`.
:Exceptions:
- `KeyError` for unknown option names.
- `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion
function).
"""
options = {}
for name, value in option_list:
convertor = options_spec[name] # raises KeyError if unknown
if options.has_key(name):
raise DuplicateOptionError('duplicate option "%s"' % name)
try:
options[name] = convertor(value)
except (ValueError, TypeError), detail:
raise detail.__class__('(option: "%s"; value: %r)\n%s'
% (name, value, detail))
return options
class NameValueError(DataError): pass
def extract_name_value(line):
"""
Return a list of (name, value) from a line of the form "name=value ...".
:Exception:
`NameValueError` for invalid input (missing name, missing data, bad
quotes, etc.).
"""
attlist = []
while line:
equals = line.find('=')
if equals == -1:
raise NameValueError('missing "="')
attname = line[:equals].strip()
if equals == 0 or not attname:
raise NameValueError(
'missing attribute name before "="')
line = line[equals+1:].lstrip()
if not line:
raise NameValueError(
'missing value after "%s="' % attname)
if line[0] in '\'"':
endquote = line.find(line[0], 1)
if endquote == -1:
raise NameValueError(
'attribute "%s" missing end quote (%s)'
% (attname, line[0]))
if len(line) > endquote + 1 and line[endquote + 1].strip():
raise NameValueError(
'attribute "%s" end quote (%s) not followed by '
'whitespace' % (attname, line[0]))
data = line[1:endquote]
line = line[endquote+1:].lstrip()
else:
space = line.find(' ')
if space == -1:
data = line
line = ''
else:
data = line[:space]
line = line[space+1:].lstrip()
attlist.append((attname.lower(), data))
return attlist
def normalize_name(name):
"""Return a case- and whitespace-normalized name."""
return ' '.join(name.lower().split())
def new_document(source, settings=None):
if settings is None:
settings = frontend.OptionParser().get_default_values()
reporter = Reporter(source, settings.report_level, settings.halt_level,
settings.warning_stream, settings.debug)
document = nodes.document(settings, reporter, source=source)
document.note_source(source, -1)
return document
def clean_rcs_keywords(paragraph, keyword_substitutions):
if len(paragraph) == 1 and isinstance(paragraph[0], nodes.Text):
textnode = paragraph[0]
for pattern, substitution in keyword_substitutions:
match = pattern.match(textnode.data)
if match:
textnode.data = pattern.sub(substitution, textnode.data)
return
def relative_path(source, target):
"""
Build and return a path to `target`, relative to `source`.
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or '').split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
# Nothing in common between paths.
# Return absolute path, using '/' for URLs:
return '/'.join(target_parts)
source_parts.reverse()
target_parts.reverse()
while (source_parts and target_parts
and source_parts[-1] == target_parts[-1]):
# Remove path components in common:
source_parts.pop()
target_parts.pop()
target_parts.reverse()
parts = ['..'] * (len(source_parts) - 1) + target_parts
return '/'.join(parts)
def get_source_line(node):
"""
Return the "source" and "line" attributes from the `node` given or from
it's closest ancestor.
"""
while node:
if node.source or node.line:
return node.source, node.line
node = node.parent
return None, None

View File

@ -0,0 +1 @@
*.pyc

View File

@ -0,0 +1,82 @@
# Authors: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils Writer modules.
"""
__docformat__ = 'reStructuredText'
import sys
import docutils
from docutils import languages, Component
from docutils.transforms import universal
class Writer(Component):
"""
Abstract base class for docutils Writers.
Each writer module or package must export a subclass also called 'Writer'.
Each writer must support all standard node types listed in
`docutils.nodes.node_class_names`.
Call `write()` to process a document.
"""
component_type = 'writer'
document = None
"""The document to write."""
language = None
"""Language module for the document."""
destination = None
"""`docutils.io` IO object; where to write the document."""
def __init__(self):
"""Initialize the Writer instance."""
def write(self, document, destination):
self.document = document
self.language = languages.get_language(
document.settings.language_code)
self.destination = destination
self.translate()
output = self.destination.write(self.output)
return output
def translate(self):
"""
Override to do final document tree translation.
This is usually done with a `docutils.nodes.NodeVisitor` subclass, in
combination with a call to `docutils.nodes.Node.walk()` or
`docutils.nodes.Node.walkabout()`. The ``NodeVisitor`` subclass must
support all standard elements (listed in
`docutils.nodes.node_class_names`) and possibly non-standard elements
used by the current Reader as well.
"""
raise NotImplementedError('subclass must override this method')
_writer_aliases = {
'html': 'html4css1',
'pprint': 'pseudoxml',
'pformat': 'pseudoxml',
'pdf': 'rlpdf',
'xml': 'docutils_xml',}
def get_writer_class(writer_name):
"""Return the Writer class from the `writer_name` module."""
writer_name = writer_name.lower()
if _writer_aliases.has_key(writer_name):
writer_name = _writer_aliases[writer_name]
module = __import__(writer_name, globals(), locals())
return module.Writer

View File

@ -0,0 +1,56 @@
# Authors: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Simple internal document tree Writer, writes Docutils XML.
"""
__docformat__ = 'reStructuredText'
import docutils
from docutils import writers
class Writer(writers.Writer):
supported = ('xml',)
"""Formats this writer supports."""
settings_spec = (
'"Docutils XML" Writer Options',
'Warning: these options may adversely affect whitespace; use them '
'only for reading convenience.',
(('Generate XML with newlines before and after tags.',
['--newlines'], {'action': 'store_true'}),
('Generate XML with indents and newlines.',
['--indents'], {'action': 'store_true'}),),)
output = None
"""Final translated form of `document`."""
xml_declaration = '<?xml version="1.0" encoding="%s"?>\n'
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype = (
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/spec/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n'
def translate(self):
settings = self.document.settings
indent = newline = ''
if settings.newlines:
newline = '\n'
if settings.indents:
newline = '\n'
indent = ' '
output_prefix = [self.xml_declaration % settings.output_encoding,
self.doctype,
self.generator % docutils.__version__]
docnode = self.document.asdom().childNodes[0]
self.output = (''.join(output_prefix)
+ docnode.toprettyxml(indent, newline))

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,113 @@
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
PEP HTML Writer.
"""
__docformat__ = 'reStructuredText'
import sys
import docutils
from docutils import nodes, optik, utils
from docutils.writers import html4css1
class Writer(html4css1.Writer):
settings_spec = html4css1.Writer.settings_spec + (
'PEP/HTML-Specific Options',
'The HTML --footnote-references option is set to "brackets" by '
'default.',
(('Specify a PEP stylesheet URL, used verbatim. Default is '
'--stylesheet\'s value. If given, --pep-stylesheet overrides '
'--stylesheet.',
['--pep-stylesheet'],
{'metavar': '<URL>'}),
('Specify a PEP stylesheet file, relative to the current working '
'directory. The path is adjusted relative to the output HTML '
'file. Overrides --pep-stylesheet and --stylesheet-path.',
['--pep-stylesheet-path'],
{'metavar': '<path>'}),
('Specify a template file. Default is "pep-html-template".',
['--pep-template'],
{'default': 'pep-html-template', 'metavar': '<file>'}),
('Python\'s home URL. Default is ".." (parent directory).',
['--python-home'],
{'default': '..', 'metavar': '<URL>'}),
('Home URL prefix for PEPs. Default is "." (current directory).',
['--pep-home'],
{'default': '.', 'metavar': '<URL>'}),
# Workaround for SourceForge's broken Python
# (``import random`` causes a segfault).
(optik.SUPPRESS_HELP,
['--no-random'], {'action': 'store_true'}),))
settings_default_overrides = {'footnote_references': 'brackets'}
relative_path_settings = ('pep_stylesheet_path', 'pep_template')
def __init__(self):
html4css1.Writer.__init__(self)
self.translator_class = HTMLTranslator
def translate(self):
html4css1.Writer.translate(self)
settings = self.document.settings
template = open(settings.pep_template).read()
# Substitutions dict for template:
subs = {}
subs['encoding'] = settings.output_encoding
subs['version'] = docutils.__version__
subs['stylesheet'] = ''.join(self.stylesheet)
pyhome = settings.python_home
subs['pyhome'] = pyhome
subs['pephome'] = settings.pep_home
if pyhome == '..':
subs['pepindex'] = '.'
else:
subs['pepindex'] = pyhome + '/peps/'
index = self.document.first_child_matching_class(nodes.field_list)
header = self.document[index]
pepnum = header[0][1].astext()
subs['pep'] = pepnum
if settings.no_random:
subs['banner'] = 0
else:
import random
subs['banner'] = random.randrange(64)
try:
subs['pepnum'] = '%04i' % int(pepnum)
except:
subs['pepnum'] = pepnum
subs['title'] = header[1][1].astext()
subs['body'] = ''.join(
self.body_pre_docinfo + self.docinfo + self.body)
subs['body_suffix'] = ''.join(self.body_suffix)
self.output = template % subs
class HTMLTranslator(html4css1.HTMLTranslator):
def get_stylesheet_reference(self, relative_to=None):
settings = self.settings
if relative_to == None:
relative_to = settings._destination
if settings.pep_stylesheet_path:
return utils.relative_path(relative_to,
settings.pep_stylesheet_path)
elif settings.pep_stylesheet:
return settings.pep_stylesheet
elif settings._stylesheet_path:
return utils.relative_path(relative_to, settings.stylesheet_path)
else:
return settings.stylesheet
def depart_field_list(self, node):
html4css1.HTMLTranslator.depart_field_list(self, node)
if node.get('class') == 'rfc2822':
self.body.append('<hr />\n')

View File

@ -0,0 +1,30 @@
# Authors: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision$
# Date: $Date$
# Copyright: This module has been placed in the public domain.
"""
Simple internal document tree Writer, writes indented pseudo-XML.
"""
__docformat__ = 'reStructuredText'
from docutils import writers
class Writer(writers.Writer):
supported = ('pprint', 'pformat', 'pseudoxml')
"""Formats this writer supports."""
output = None
"""Final translated form of `document`."""
def translate(self):
self.output = self.document.pformat()
def supports(self, format):
"""This writer supports all format-specific elements."""
return 1