chore: automatic commit 2025-04-30 12:48
This commit is contained in:
@@ -0,0 +1,82 @@
|
||||
"""
|
||||
Pygments
|
||||
~~~~~~~~
|
||||
|
||||
Pygments is a syntax highlighting package written in Python.
|
||||
|
||||
It is a generic syntax highlighter for general use in all kinds of software
|
||||
such as forum systems, wikis or other applications that need to prettify
|
||||
source code. Highlights are:
|
||||
|
||||
* a wide range of common languages and markup formats is supported
|
||||
* special attention is paid to details, increasing quality by a fair amount
|
||||
* support for new languages and formats are added easily
|
||||
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
|
||||
formats that PIL supports, and ANSI sequences
|
||||
* it is usable as a command-line tool and as a library
|
||||
* ... and it highlights even Brainfuck!
|
||||
|
||||
The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
|
||||
|
||||
.. _Pygments master branch:
|
||||
https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
from io import StringIO, BytesIO
|
||||
|
||||
__version__ = '2.19.1'
|
||||
__docformat__ = 'restructuredtext'
|
||||
|
||||
__all__ = ['lex', 'format', 'highlight']
|
||||
|
||||
|
||||
def lex(code, lexer):
|
||||
"""
|
||||
Lex `code` with the `lexer` (must be a `Lexer` instance)
|
||||
and return an iterable of tokens. Currently, this only calls
|
||||
`lexer.get_tokens()`.
|
||||
"""
|
||||
try:
|
||||
return lexer.get_tokens(code)
|
||||
except TypeError:
|
||||
# Heuristic to catch a common mistake.
|
||||
from pip._vendor.pygments.lexer import RegexLexer
|
||||
if isinstance(lexer, type) and issubclass(lexer, RegexLexer):
|
||||
raise TypeError('lex() argument must be a lexer instance, '
|
||||
'not a class')
|
||||
raise
|
||||
|
||||
|
||||
def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
|
||||
"""
|
||||
Format ``tokens`` (an iterable of tokens) with the formatter ``formatter``
|
||||
(a `Formatter` instance).
|
||||
|
||||
If ``outfile`` is given and a valid file object (an object with a
|
||||
``write`` method), the result will be written to it, otherwise it
|
||||
is returned as a string.
|
||||
"""
|
||||
try:
|
||||
if not outfile:
|
||||
realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
|
||||
formatter.format(tokens, realoutfile)
|
||||
return realoutfile.getvalue()
|
||||
else:
|
||||
formatter.format(tokens, outfile)
|
||||
except TypeError:
|
||||
# Heuristic to catch a common mistake.
|
||||
from pip._vendor.pygments.formatter import Formatter
|
||||
if isinstance(formatter, type) and issubclass(formatter, Formatter):
|
||||
raise TypeError('format() argument must be a formatter instance, '
|
||||
'not a class')
|
||||
raise
|
||||
|
||||
|
||||
def highlight(code, lexer, formatter, outfile=None):
|
||||
"""
|
||||
This is the most high-level highlighting function. It combines `lex` and
|
||||
`format` in one function.
|
||||
"""
|
||||
return format(lex(code, lexer), formatter, outfile)
|
||||
@@ -0,0 +1,17 @@
|
||||
"""
|
||||
pygments.__main__
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Main entry point for ``python -m pygments``.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from pip._vendor.pygments.cmdline import main
|
||||
|
||||
try:
|
||||
sys.exit(main(sys.argv))
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,70 @@
|
||||
"""
|
||||
pygments.console
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Format colored console output.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
esc = "\x1b["
|
||||
|
||||
codes = {}
|
||||
codes[""] = ""
|
||||
codes["reset"] = esc + "39;49;00m"
|
||||
|
||||
codes["bold"] = esc + "01m"
|
||||
codes["faint"] = esc + "02m"
|
||||
codes["standout"] = esc + "03m"
|
||||
codes["underline"] = esc + "04m"
|
||||
codes["blink"] = esc + "05m"
|
||||
codes["overline"] = esc + "06m"
|
||||
|
||||
dark_colors = ["black", "red", "green", "yellow", "blue",
|
||||
"magenta", "cyan", "gray"]
|
||||
light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
|
||||
"brightmagenta", "brightcyan", "white"]
|
||||
|
||||
x = 30
|
||||
for dark, light in zip(dark_colors, light_colors):
|
||||
codes[dark] = esc + "%im" % x
|
||||
codes[light] = esc + "%im" % (60 + x)
|
||||
x += 1
|
||||
|
||||
del dark, light, x
|
||||
|
||||
codes["white"] = codes["bold"]
|
||||
|
||||
|
||||
def reset_color():
|
||||
return codes["reset"]
|
||||
|
||||
|
||||
def colorize(color_key, text):
|
||||
return codes[color_key] + text + codes["reset"]
|
||||
|
||||
|
||||
def ansiformat(attr, text):
|
||||
"""
|
||||
Format ``text`` with a color and/or some attributes::
|
||||
|
||||
color normal color
|
||||
*color* bold color
|
||||
_color_ underlined color
|
||||
+color+ blinking color
|
||||
"""
|
||||
result = []
|
||||
if attr[:1] == attr[-1:] == '+':
|
||||
result.append(codes['blink'])
|
||||
attr = attr[1:-1]
|
||||
if attr[:1] == attr[-1:] == '*':
|
||||
result.append(codes['bold'])
|
||||
attr = attr[1:-1]
|
||||
if attr[:1] == attr[-1:] == '_':
|
||||
result.append(codes['underline'])
|
||||
attr = attr[1:-1]
|
||||
result.append(codes[attr])
|
||||
result.append(text)
|
||||
result.append(codes['reset'])
|
||||
return ''.join(result)
|
||||
@@ -0,0 +1,70 @@
|
||||
"""
|
||||
pygments.filter
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Module that implements the default filter.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
|
||||
def apply_filters(stream, filters, lexer=None):
|
||||
"""
|
||||
Use this method to apply an iterable of filters to
|
||||
a stream. If lexer is given it's forwarded to the
|
||||
filter, otherwise the filter receives `None`.
|
||||
"""
|
||||
def _apply(filter_, stream):
|
||||
yield from filter_.filter(lexer, stream)
|
||||
for filter_ in filters:
|
||||
stream = _apply(filter_, stream)
|
||||
return stream
|
||||
|
||||
|
||||
def simplefilter(f):
|
||||
"""
|
||||
Decorator that converts a function into a filter::
|
||||
|
||||
@simplefilter
|
||||
def lowercase(self, lexer, stream, options):
|
||||
for ttype, value in stream:
|
||||
yield ttype, value.lower()
|
||||
"""
|
||||
return type(f.__name__, (FunctionFilter,), {
|
||||
'__module__': getattr(f, '__module__'),
|
||||
'__doc__': f.__doc__,
|
||||
'function': f,
|
||||
})
|
||||
|
||||
|
||||
class Filter:
|
||||
"""
|
||||
Default filter. Subclass this class or use the `simplefilter`
|
||||
decorator to create own filters.
|
||||
"""
|
||||
|
||||
def __init__(self, **options):
|
||||
self.options = options
|
||||
|
||||
def filter(self, lexer, stream):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class FunctionFilter(Filter):
|
||||
"""
|
||||
Abstract class used by `simplefilter` to create simple
|
||||
function filters on the fly. The `simplefilter` decorator
|
||||
automatically creates subclasses of this class for
|
||||
functions passed to it.
|
||||
"""
|
||||
function = None
|
||||
|
||||
def __init__(self, **options):
|
||||
if not hasattr(self, 'function'):
|
||||
raise TypeError(f'{self.__class__.__name__!r} used without bound function')
|
||||
Filter.__init__(self, **options)
|
||||
|
||||
def filter(self, lexer, stream):
|
||||
# pylint: disable=not-callable
|
||||
yield from self.function(lexer, stream, self.options)
|
||||
@@ -0,0 +1,940 @@
|
||||
"""
|
||||
pygments.filters
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Module containing filter lookup functions and default
|
||||
filters.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from pip._vendor.pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
|
||||
string_to_tokentype
|
||||
from pip._vendor.pygments.filter import Filter
|
||||
from pip._vendor.pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
|
||||
get_choice_opt, ClassNotFound, OptionError
|
||||
from pip._vendor.pygments.plugin import find_plugin_filters
|
||||
|
||||
|
||||
def find_filter_class(filtername):
|
||||
"""Lookup a filter by name. Return None if not found."""
|
||||
if filtername in FILTERS:
|
||||
return FILTERS[filtername]
|
||||
for name, cls in find_plugin_filters():
|
||||
if name == filtername:
|
||||
return cls
|
||||
return None
|
||||
|
||||
|
||||
def get_filter_by_name(filtername, **options):
|
||||
"""Return an instantiated filter.
|
||||
|
||||
Options are passed to the filter initializer if wanted.
|
||||
Raise a ClassNotFound if not found.
|
||||
"""
|
||||
cls = find_filter_class(filtername)
|
||||
if cls:
|
||||
return cls(**options)
|
||||
else:
|
||||
raise ClassNotFound(f'filter {filtername!r} not found')
|
||||
|
||||
|
||||
def get_all_filters():
|
||||
"""Return a generator of all filter names."""
|
||||
yield from FILTERS
|
||||
for name, _ in find_plugin_filters():
|
||||
yield name
|
||||
|
||||
|
||||
def _replace_special(ttype, value, regex, specialttype,
|
||||
replacefunc=lambda x: x):
|
||||
last = 0
|
||||
for match in regex.finditer(value):
|
||||
start, end = match.start(), match.end()
|
||||
if start != last:
|
||||
yield ttype, value[last:start]
|
||||
yield specialttype, replacefunc(value[start:end])
|
||||
last = end
|
||||
if last != len(value):
|
||||
yield ttype, value[last:]
|
||||
|
||||
|
||||
class CodeTagFilter(Filter):
|
||||
"""Highlight special code tags in comments and docstrings.
|
||||
|
||||
Options accepted:
|
||||
|
||||
`codetags` : list of strings
|
||||
A list of strings that are flagged as code tags. The default is to
|
||||
highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``.
|
||||
|
||||
.. versionchanged:: 2.13
|
||||
Now recognizes ``FIXME`` by default.
|
||||
"""
|
||||
|
||||
def __init__(self, **options):
|
||||
Filter.__init__(self, **options)
|
||||
tags = get_list_opt(options, 'codetags',
|
||||
['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE'])
|
||||
self.tag_re = re.compile(r'\b({})\b'.format('|'.join([
|
||||
re.escape(tag) for tag in tags if tag
|
||||
])))
|
||||
|
||||
def filter(self, lexer, stream):
|
||||
regex = self.tag_re
|
||||
for ttype, value in stream:
|
||||
if ttype in String.Doc or \
|
||||
ttype in Comment and \
|
||||
ttype not in Comment.Preproc:
|
||||
yield from _replace_special(ttype, value, regex, Comment.Special)
|
||||
else:
|
||||
yield ttype, value
|
||||
|
||||
|
||||
class SymbolFilter(Filter):
|
||||
"""Convert mathematical symbols such as \\<longrightarrow> in Isabelle
|
||||
or \\longrightarrow in LaTeX into Unicode characters.
|
||||
|
||||
This is mostly useful for HTML or console output when you want to
|
||||
approximate the source rendering you'd see in an IDE.
|
||||
|
||||
Options accepted:
|
||||
|
||||
`lang` : string
|
||||
The symbol language. Must be one of ``'isabelle'`` or
|
||||
``'latex'``. The default is ``'isabelle'``.
|
||||
"""
|
||||
|
||||
latex_symbols = {
|
||||
'\\alpha' : '\U000003b1',
|
||||
'\\beta' : '\U000003b2',
|
||||
'\\gamma' : '\U000003b3',
|
||||
'\\delta' : '\U000003b4',
|
||||
'\\varepsilon' : '\U000003b5',
|
||||
'\\zeta' : '\U000003b6',
|
||||
'\\eta' : '\U000003b7',
|
||||
'\\vartheta' : '\U000003b8',
|
||||
'\\iota' : '\U000003b9',
|
||||
'\\kappa' : '\U000003ba',
|
||||
'\\lambda' : '\U000003bb',
|
||||
'\\mu' : '\U000003bc',
|
||||
'\\nu' : '\U000003bd',
|
||||
'\\xi' : '\U000003be',
|
||||
'\\pi' : '\U000003c0',
|
||||
'\\varrho' : '\U000003c1',
|
||||
'\\sigma' : '\U000003c3',
|
||||
'\\tau' : '\U000003c4',
|
||||
'\\upsilon' : '\U000003c5',
|
||||
'\\varphi' : '\U000003c6',
|
||||
'\\chi' : '\U000003c7',
|
||||
'\\psi' : '\U000003c8',
|
||||
'\\omega' : '\U000003c9',
|
||||
'\\Gamma' : '\U00000393',
|
||||
'\\Delta' : '\U00000394',
|
||||
'\\Theta' : '\U00000398',
|
||||
'\\Lambda' : '\U0000039b',
|
||||
'\\Xi' : '\U0000039e',
|
||||
'\\Pi' : '\U000003a0',
|
||||
'\\Sigma' : '\U000003a3',
|
||||
'\\Upsilon' : '\U000003a5',
|
||||
'\\Phi' : '\U000003a6',
|
||||
'\\Psi' : '\U000003a8',
|
||||
'\\Omega' : '\U000003a9',
|
||||
'\\leftarrow' : '\U00002190',
|
||||
'\\longleftarrow' : '\U000027f5',
|
||||
'\\rightarrow' : '\U00002192',
|
||||
'\\longrightarrow' : '\U000027f6',
|
||||
'\\Leftarrow' : '\U000021d0',
|
||||
'\\Longleftarrow' : '\U000027f8',
|
||||
'\\Rightarrow' : '\U000021d2',
|
||||
'\\Longrightarrow' : '\U000027f9',
|
||||
'\\leftrightarrow' : '\U00002194',
|
||||
'\\longleftrightarrow' : '\U000027f7',
|
||||
'\\Leftrightarrow' : '\U000021d4',
|
||||
'\\Longleftrightarrow' : '\U000027fa',
|
||||
'\\mapsto' : '\U000021a6',
|
||||
'\\longmapsto' : '\U000027fc',
|
||||
'\\relbar' : '\U00002500',
|
||||
'\\Relbar' : '\U00002550',
|
||||
'\\hookleftarrow' : '\U000021a9',
|
||||
'\\hookrightarrow' : '\U000021aa',
|
||||
'\\leftharpoondown' : '\U000021bd',
|
||||
'\\rightharpoondown' : '\U000021c1',
|
||||
'\\leftharpoonup' : '\U000021bc',
|
||||
'\\rightharpoonup' : '\U000021c0',
|
||||
'\\rightleftharpoons' : '\U000021cc',
|
||||
'\\leadsto' : '\U0000219d',
|
||||
'\\downharpoonleft' : '\U000021c3',
|
||||
'\\downharpoonright' : '\U000021c2',
|
||||
'\\upharpoonleft' : '\U000021bf',
|
||||
'\\upharpoonright' : '\U000021be',
|
||||
'\\restriction' : '\U000021be',
|
||||
'\\uparrow' : '\U00002191',
|
||||
'\\Uparrow' : '\U000021d1',
|
||||
'\\downarrow' : '\U00002193',
|
||||
'\\Downarrow' : '\U000021d3',
|
||||
'\\updownarrow' : '\U00002195',
|
||||
'\\Updownarrow' : '\U000021d5',
|
||||
'\\langle' : '\U000027e8',
|
||||
'\\rangle' : '\U000027e9',
|
||||
'\\lceil' : '\U00002308',
|
||||
'\\rceil' : '\U00002309',
|
||||
'\\lfloor' : '\U0000230a',
|
||||
'\\rfloor' : '\U0000230b',
|
||||
'\\flqq' : '\U000000ab',
|
||||
'\\frqq' : '\U000000bb',
|
||||
'\\bot' : '\U000022a5',
|
||||
'\\top' : '\U000022a4',
|
||||
'\\wedge' : '\U00002227',
|
||||
'\\bigwedge' : '\U000022c0',
|
||||
'\\vee' : '\U00002228',
|
||||
'\\bigvee' : '\U000022c1',
|
||||
'\\forall' : '\U00002200',
|
||||
'\\exists' : '\U00002203',
|
||||
'\\nexists' : '\U00002204',
|
||||
'\\neg' : '\U000000ac',
|
||||
'\\Box' : '\U000025a1',
|
||||
'\\Diamond' : '\U000025c7',
|
||||
'\\vdash' : '\U000022a2',
|
||||
'\\models' : '\U000022a8',
|
||||
'\\dashv' : '\U000022a3',
|
||||
'\\surd' : '\U0000221a',
|
||||
'\\le' : '\U00002264',
|
||||
'\\ge' : '\U00002265',
|
||||
'\\ll' : '\U0000226a',
|
||||
'\\gg' : '\U0000226b',
|
||||
'\\lesssim' : '\U00002272',
|
||||
'\\gtrsim' : '\U00002273',
|
||||
'\\lessapprox' : '\U00002a85',
|
||||
'\\gtrapprox' : '\U00002a86',
|
||||
'\\in' : '\U00002208',
|
||||
'\\notin' : '\U00002209',
|
||||
'\\subset' : '\U00002282',
|
||||
'\\supset' : '\U00002283',
|
||||
'\\subseteq' : '\U00002286',
|
||||
'\\supseteq' : '\U00002287',
|
||||
'\\sqsubset' : '\U0000228f',
|
||||
'\\sqsupset' : '\U00002290',
|
||||
'\\sqsubseteq' : '\U00002291',
|
||||
'\\sqsupseteq' : '\U00002292',
|
||||
'\\cap' : '\U00002229',
|
||||
'\\bigcap' : '\U000022c2',
|
||||
'\\cup' : '\U0000222a',
|
||||
'\\bigcup' : '\U000022c3',
|
||||
'\\sqcup' : '\U00002294',
|
||||
'\\bigsqcup' : '\U00002a06',
|
||||
'\\sqcap' : '\U00002293',
|
||||
'\\Bigsqcap' : '\U00002a05',
|
||||
'\\setminus' : '\U00002216',
|
||||
'\\propto' : '\U0000221d',
|
||||
'\\uplus' : '\U0000228e',
|
||||
'\\bigplus' : '\U00002a04',
|
||||
'\\sim' : '\U0000223c',
|
||||
'\\doteq' : '\U00002250',
|
||||
'\\simeq' : '\U00002243',
|
||||
'\\approx' : '\U00002248',
|
||||
'\\asymp' : '\U0000224d',
|
||||
'\\cong' : '\U00002245',
|
||||
'\\equiv' : '\U00002261',
|
||||
'\\Join' : '\U000022c8',
|
||||
'\\bowtie' : '\U00002a1d',
|
||||
'\\prec' : '\U0000227a',
|
||||
'\\succ' : '\U0000227b',
|
||||
'\\preceq' : '\U0000227c',
|
||||
'\\succeq' : '\U0000227d',
|
||||
'\\parallel' : '\U00002225',
|
||||
'\\mid' : '\U000000a6',
|
||||
'\\pm' : '\U000000b1',
|
||||
'\\mp' : '\U00002213',
|
||||
'\\times' : '\U000000d7',
|
||||
'\\div' : '\U000000f7',
|
||||
'\\cdot' : '\U000022c5',
|
||||
'\\star' : '\U000022c6',
|
||||
'\\circ' : '\U00002218',
|
||||
'\\dagger' : '\U00002020',
|
||||
'\\ddagger' : '\U00002021',
|
||||
'\\lhd' : '\U000022b2',
|
||||
'\\rhd' : '\U000022b3',
|
||||
'\\unlhd' : '\U000022b4',
|
||||
'\\unrhd' : '\U000022b5',
|
||||
'\\triangleleft' : '\U000025c3',
|
||||
'\\triangleright' : '\U000025b9',
|
||||
'\\triangle' : '\U000025b3',
|
||||
'\\triangleq' : '\U0000225c',
|
||||
'\\oplus' : '\U00002295',
|
||||
'\\bigoplus' : '\U00002a01',
|
||||
'\\otimes' : '\U00002297',
|
||||
'\\bigotimes' : '\U00002a02',
|
||||
'\\odot' : '\U00002299',
|
||||
'\\bigodot' : '\U00002a00',
|
||||
'\\ominus' : '\U00002296',
|
||||
'\\oslash' : '\U00002298',
|
||||
'\\dots' : '\U00002026',
|
||||
'\\cdots' : '\U000022ef',
|
||||
'\\sum' : '\U00002211',
|
||||
'\\prod' : '\U0000220f',
|
||||
'\\coprod' : '\U00002210',
|
||||
'\\infty' : '\U0000221e',
|
||||
'\\int' : '\U0000222b',
|
||||
'\\oint' : '\U0000222e',
|
||||
'\\clubsuit' : '\U00002663',
|
||||
'\\diamondsuit' : '\U00002662',
|
||||
'\\heartsuit' : '\U00002661',
|
||||
'\\spadesuit' : '\U00002660',
|
||||
'\\aleph' : '\U00002135',
|
||||
'\\emptyset' : '\U00002205',
|
||||
'\\nabla' : '\U00002207',
|
||||
'\\partial' : '\U00002202',
|
||||
'\\flat' : '\U0000266d',
|
||||
'\\natural' : '\U0000266e',
|
||||
'\\sharp' : '\U0000266f',
|
||||
'\\angle' : '\U00002220',
|
||||
'\\copyright' : '\U000000a9',
|
||||
'\\textregistered' : '\U000000ae',
|
||||
'\\textonequarter' : '\U000000bc',
|
||||
'\\textonehalf' : '\U000000bd',
|
||||
'\\textthreequarters' : '\U000000be',
|
||||
'\\textordfeminine' : '\U000000aa',
|
||||
'\\textordmasculine' : '\U000000ba',
|
||||
'\\euro' : '\U000020ac',
|
||||
'\\pounds' : '\U000000a3',
|
||||
'\\yen' : '\U000000a5',
|
||||
'\\textcent' : '\U000000a2',
|
||||
'\\textcurrency' : '\U000000a4',
|
||||
'\\textdegree' : '\U000000b0',
|
||||
}
|
||||
|
||||
isabelle_symbols = {
|
||||
'\\<zero>' : '\U0001d7ec',
|
||||
'\\<one>' : '\U0001d7ed',
|
||||
'\\<two>' : '\U0001d7ee',
|
||||
'\\<three>' : '\U0001d7ef',
|
||||
'\\<four>' : '\U0001d7f0',
|
||||
'\\<five>' : '\U0001d7f1',
|
||||
'\\<six>' : '\U0001d7f2',
|
||||
'\\<seven>' : '\U0001d7f3',
|
||||
'\\<eight>' : '\U0001d7f4',
|
||||
'\\<nine>' : '\U0001d7f5',
|
||||
'\\<A>' : '\U0001d49c',
|
||||
'\\<B>' : '\U0000212c',
|
||||
'\\<C>' : '\U0001d49e',
|
||||
'\\<D>' : '\U0001d49f',
|
||||
'\\<E>' : '\U00002130',
|
||||
'\\<F>' : '\U00002131',
|
||||
'\\<G>' : '\U0001d4a2',
|
||||
'\\<H>' : '\U0000210b',
|
||||
'\\<I>' : '\U00002110',
|
||||
'\\<J>' : '\U0001d4a5',
|
||||
'\\<K>' : '\U0001d4a6',
|
||||
'\\<L>' : '\U00002112',
|
||||
'\\<M>' : '\U00002133',
|
||||
'\\<N>' : '\U0001d4a9',
|
||||
'\\<O>' : '\U0001d4aa',
|
||||
'\\<P>' : '\U0001d4ab',
|
||||
'\\<Q>' : '\U0001d4ac',
|
||||
'\\<R>' : '\U0000211b',
|
||||
'\\<S>' : '\U0001d4ae',
|
||||
'\\<T>' : '\U0001d4af',
|
||||
'\\<U>' : '\U0001d4b0',
|
||||
'\\<V>' : '\U0001d4b1',
|
||||
'\\<W>' : '\U0001d4b2',
|
||||
'\\<X>' : '\U0001d4b3',
|
||||
'\\<Y>' : '\U0001d4b4',
|
||||
'\\<Z>' : '\U0001d4b5',
|
||||
'\\<a>' : '\U0001d5ba',
|
||||
'\\<b>' : '\U0001d5bb',
|
||||
'\\<c>' : '\U0001d5bc',
|
||||
'\\<d>' : '\U0001d5bd',
|
||||
'\\<e>' : '\U0001d5be',
|
||||
'\\<f>' : '\U0001d5bf',
|
||||
'\\<g>' : '\U0001d5c0',
|
||||
'\\<h>' : '\U0001d5c1',
|
||||
'\\<i>' : '\U0001d5c2',
|
||||
'\\<j>' : '\U0001d5c3',
|
||||
'\\<k>' : '\U0001d5c4',
|
||||
'\\<l>' : '\U0001d5c5',
|
||||
'\\<m>' : '\U0001d5c6',
|
||||
'\\<n>' : '\U0001d5c7',
|
||||
'\\<o>' : '\U0001d5c8',
|
||||
'\\<p>' : '\U0001d5c9',
|
||||
'\\<q>' : '\U0001d5ca',
|
||||
'\\<r>' : '\U0001d5cb',
|
||||
'\\<s>' : '\U0001d5cc',
|
||||
'\\<t>' : '\U0001d5cd',
|
||||
'\\<u>' : '\U0001d5ce',
|
||||
'\\<v>' : '\U0001d5cf',
|
||||
'\\<w>' : '\U0001d5d0',
|
||||
'\\<x>' : '\U0001d5d1',
|
||||
'\\<y>' : '\U0001d5d2',
|
||||
'\\<z>' : '\U0001d5d3',
|
||||
'\\<AA>' : '\U0001d504',
|
||||
'\\<BB>' : '\U0001d505',
|
||||
'\\<CC>' : '\U0000212d',
|
||||
'\\<DD>' : '\U0001d507',
|
||||
'\\<EE>' : '\U0001d508',
|
||||
'\\<FF>' : '\U0001d509',
|
||||
'\\<GG>' : '\U0001d50a',
|
||||
'\\<HH>' : '\U0000210c',
|
||||
'\\<II>' : '\U00002111',
|
||||
'\\<JJ>' : '\U0001d50d',
|
||||
'\\<KK>' : '\U0001d50e',
|
||||
'\\<LL>' : '\U0001d50f',
|
||||
'\\<MM>' : '\U0001d510',
|
||||
'\\<NN>' : '\U0001d511',
|
||||
'\\<OO>' : '\U0001d512',
|
||||
'\\<PP>' : '\U0001d513',
|
||||
'\\<QQ>' : '\U0001d514',
|
||||
'\\<RR>' : '\U0000211c',
|
||||
'\\<SS>' : '\U0001d516',
|
||||
'\\<TT>' : '\U0001d517',
|
||||
'\\<UU>' : '\U0001d518',
|
||||
'\\<VV>' : '\U0001d519',
|
||||
'\\<WW>' : '\U0001d51a',
|
||||
'\\<XX>' : '\U0001d51b',
|
||||
'\\<YY>' : '\U0001d51c',
|
||||
'\\<ZZ>' : '\U00002128',
|
||||
'\\<aa>' : '\U0001d51e',
|
||||
'\\<bb>' : '\U0001d51f',
|
||||
'\\<cc>' : '\U0001d520',
|
||||
'\\<dd>' : '\U0001d521',
|
||||
'\\<ee>' : '\U0001d522',
|
||||
'\\<ff>' : '\U0001d523',
|
||||
'\\<gg>' : '\U0001d524',
|
||||
'\\<hh>' : '\U0001d525',
|
||||
'\\<ii>' : '\U0001d526',
|
||||
'\\<jj>' : '\U0001d527',
|
||||
'\\<kk>' : '\U0001d528',
|
||||
'\\<ll>' : '\U0001d529',
|
||||
'\\<mm>' : '\U0001d52a',
|
||||
'\\<nn>' : '\U0001d52b',
|
||||
'\\<oo>' : '\U0001d52c',
|
||||
'\\<pp>' : '\U0001d52d',
|
||||
'\\<qq>' : '\U0001d52e',
|
||||
'\\<rr>' : '\U0001d52f',
|
||||
'\\<ss>' : '\U0001d530',
|
||||
'\\<tt>' : '\U0001d531',
|
||||
'\\<uu>' : '\U0001d532',
|
||||
'\\<vv>' : '\U0001d533',
|
||||
'\\<ww>' : '\U0001d534',
|
||||
'\\<xx>' : '\U0001d535',
|
||||
'\\<yy>' : '\U0001d536',
|
||||
'\\<zz>' : '\U0001d537',
|
||||
'\\<alpha>' : '\U000003b1',
|
||||
'\\<beta>' : '\U000003b2',
|
||||
'\\<gamma>' : '\U000003b3',
|
||||
'\\<delta>' : '\U000003b4',
|
||||
'\\<epsilon>' : '\U000003b5',
|
||||
'\\<zeta>' : '\U000003b6',
|
||||
'\\<eta>' : '\U000003b7',
|
||||
'\\<theta>' : '\U000003b8',
|
||||
'\\<iota>' : '\U000003b9',
|
||||
'\\<kappa>' : '\U000003ba',
|
||||
'\\<lambda>' : '\U000003bb',
|
||||
'\\<mu>' : '\U000003bc',
|
||||
'\\<nu>' : '\U000003bd',
|
||||
'\\<xi>' : '\U000003be',
|
||||
'\\<pi>' : '\U000003c0',
|
||||
'\\<rho>' : '\U000003c1',
|
||||
'\\<sigma>' : '\U000003c3',
|
||||
'\\<tau>' : '\U000003c4',
|
||||
'\\<upsilon>' : '\U000003c5',
|
||||
'\\<phi>' : '\U000003c6',
|
||||
'\\<chi>' : '\U000003c7',
|
||||
'\\<psi>' : '\U000003c8',
|
||||
'\\<omega>' : '\U000003c9',
|
||||
'\\<Gamma>' : '\U00000393',
|
||||
'\\<Delta>' : '\U00000394',
|
||||
'\\<Theta>' : '\U00000398',
|
||||
'\\<Lambda>' : '\U0000039b',
|
||||
'\\<Xi>' : '\U0000039e',
|
||||
'\\<Pi>' : '\U000003a0',
|
||||
'\\<Sigma>' : '\U000003a3',
|
||||
'\\<Upsilon>' : '\U000003a5',
|
||||
'\\<Phi>' : '\U000003a6',
|
||||
'\\<Psi>' : '\U000003a8',
|
||||
'\\<Omega>' : '\U000003a9',
|
||||
'\\<bool>' : '\U0001d539',
|
||||
'\\<complex>' : '\U00002102',
|
||||
'\\<nat>' : '\U00002115',
|
||||
'\\<rat>' : '\U0000211a',
|
||||
'\\<real>' : '\U0000211d',
|
||||
'\\<int>' : '\U00002124',
|
||||
'\\<leftarrow>' : '\U00002190',
|
||||
'\\<longleftarrow>' : '\U000027f5',
|
||||
'\\<rightarrow>' : '\U00002192',
|
||||
'\\<longrightarrow>' : '\U000027f6',
|
||||
'\\<Leftarrow>' : '\U000021d0',
|
||||
'\\<Longleftarrow>' : '\U000027f8',
|
||||
'\\<Rightarrow>' : '\U000021d2',
|
||||
'\\<Longrightarrow>' : '\U000027f9',
|
||||
'\\<leftrightarrow>' : '\U00002194',
|
||||
'\\<longleftrightarrow>' : '\U000027f7',
|
||||
'\\<Leftrightarrow>' : '\U000021d4',
|
||||
'\\<Longleftrightarrow>' : '\U000027fa',
|
||||
'\\<mapsto>' : '\U000021a6',
|
||||
'\\<longmapsto>' : '\U000027fc',
|
||||
'\\<midarrow>' : '\U00002500',
|
||||
'\\<Midarrow>' : '\U00002550',
|
||||
'\\<hookleftarrow>' : '\U000021a9',
|
||||
'\\<hookrightarrow>' : '\U000021aa',
|
||||
'\\<leftharpoondown>' : '\U000021bd',
|
||||
'\\<rightharpoondown>' : '\U000021c1',
|
||||
'\\<leftharpoonup>' : '\U000021bc',
|
||||
'\\<rightharpoonup>' : '\U000021c0',
|
||||
'\\<rightleftharpoons>' : '\U000021cc',
|
||||
'\\<leadsto>' : '\U0000219d',
|
||||
'\\<downharpoonleft>' : '\U000021c3',
|
||||
'\\<downharpoonright>' : '\U000021c2',
|
||||
'\\<upharpoonleft>' : '\U000021bf',
|
||||
'\\<upharpoonright>' : '\U000021be',
|
||||
'\\<restriction>' : '\U000021be',
|
||||
'\\<Colon>' : '\U00002237',
|
||||
'\\<up>' : '\U00002191',
|
||||
'\\<Up>' : '\U000021d1',
|
||||
'\\<down>' : '\U00002193',
|
||||
'\\<Down>' : '\U000021d3',
|
||||
'\\<updown>' : '\U00002195',
|
||||
'\\<Updown>' : '\U000021d5',
|
||||
'\\<langle>' : '\U000027e8',
|
||||
'\\<rangle>' : '\U000027e9',
|
||||
'\\<lceil>' : '\U00002308',
|
||||
'\\<rceil>' : '\U00002309',
|
||||
'\\<lfloor>' : '\U0000230a',
|
||||
'\\<rfloor>' : '\U0000230b',
|
||||
'\\<lparr>' : '\U00002987',
|
||||
'\\<rparr>' : '\U00002988',
|
||||
'\\<lbrakk>' : '\U000027e6',
|
||||
'\\<rbrakk>' : '\U000027e7',
|
||||
'\\<lbrace>' : '\U00002983',
|
||||
'\\<rbrace>' : '\U00002984',
|
||||
'\\<guillemotleft>' : '\U000000ab',
|
||||
'\\<guillemotright>' : '\U000000bb',
|
||||
'\\<bottom>' : '\U000022a5',
|
||||
'\\<top>' : '\U000022a4',
|
||||
'\\<and>' : '\U00002227',
|
||||
'\\<And>' : '\U000022c0',
|
||||
'\\<or>' : '\U00002228',
|
||||
'\\<Or>' : '\U000022c1',
|
||||
'\\<forall>' : '\U00002200',
|
||||
'\\<exists>' : '\U00002203',
|
||||
'\\<nexists>' : '\U00002204',
|
||||
'\\<not>' : '\U000000ac',
|
||||
'\\<box>' : '\U000025a1',
|
||||
'\\<diamond>' : '\U000025c7',
|
||||
'\\<turnstile>' : '\U000022a2',
|
||||
'\\<Turnstile>' : '\U000022a8',
|
||||
'\\<tturnstile>' : '\U000022a9',
|
||||
'\\<TTurnstile>' : '\U000022ab',
|
||||
'\\<stileturn>' : '\U000022a3',
|
||||
'\\<surd>' : '\U0000221a',
|
||||
'\\<le>' : '\U00002264',
|
||||
'\\<ge>' : '\U00002265',
|
||||
'\\<lless>' : '\U0000226a',
|
||||
'\\<ggreater>' : '\U0000226b',
|
||||
'\\<lesssim>' : '\U00002272',
|
||||
'\\<greatersim>' : '\U00002273',
|
||||
'\\<lessapprox>' : '\U00002a85',
|
||||
'\\<greaterapprox>' : '\U00002a86',
|
||||
'\\<in>' : '\U00002208',
|
||||
'\\<notin>' : '\U00002209',
|
||||
'\\<subset>' : '\U00002282',
|
||||
'\\<supset>' : '\U00002283',
|
||||
'\\<subseteq>' : '\U00002286',
|
||||
'\\<supseteq>' : '\U00002287',
|
||||
'\\<sqsubset>' : '\U0000228f',
|
||||
'\\<sqsupset>' : '\U00002290',
|
||||
'\\<sqsubseteq>' : '\U00002291',
|
||||
'\\<sqsupseteq>' : '\U00002292',
|
||||
'\\<inter>' : '\U00002229',
|
||||
'\\<Inter>' : '\U000022c2',
|
||||
'\\<union>' : '\U0000222a',
|
||||
'\\<Union>' : '\U000022c3',
|
||||
'\\<squnion>' : '\U00002294',
|
||||
'\\<Squnion>' : '\U00002a06',
|
||||
'\\<sqinter>' : '\U00002293',
|
||||
'\\<Sqinter>' : '\U00002a05',
|
||||
'\\<setminus>' : '\U00002216',
|
||||
'\\<propto>' : '\U0000221d',
|
||||
'\\<uplus>' : '\U0000228e',
|
||||
'\\<Uplus>' : '\U00002a04',
|
||||
'\\<noteq>' : '\U00002260',
|
||||
'\\<sim>' : '\U0000223c',
|
||||
'\\<doteq>' : '\U00002250',
|
||||
'\\<simeq>' : '\U00002243',
|
||||
'\\<approx>' : '\U00002248',
|
||||
'\\<asymp>' : '\U0000224d',
|
||||
'\\<cong>' : '\U00002245',
|
||||
'\\<smile>' : '\U00002323',
|
||||
'\\<equiv>' : '\U00002261',
|
||||
'\\<frown>' : '\U00002322',
|
||||
'\\<Join>' : '\U000022c8',
|
||||
'\\<bowtie>' : '\U00002a1d',
|
||||
'\\<prec>' : '\U0000227a',
|
||||
'\\<succ>' : '\U0000227b',
|
||||
'\\<preceq>' : '\U0000227c',
|
||||
'\\<succeq>' : '\U0000227d',
|
||||
'\\<parallel>' : '\U00002225',
|
||||
'\\<bar>' : '\U000000a6',
|
||||
'\\<plusminus>' : '\U000000b1',
|
||||
'\\<minusplus>' : '\U00002213',
|
||||
'\\<times>' : '\U000000d7',
|
||||
'\\<div>' : '\U000000f7',
|
||||
'\\<cdot>' : '\U000022c5',
|
||||
'\\<star>' : '\U000022c6',
|
||||
'\\<bullet>' : '\U00002219',
|
||||
'\\<circ>' : '\U00002218',
|
||||
'\\<dagger>' : '\U00002020',
|
||||
'\\<ddagger>' : '\U00002021',
|
||||
'\\<lhd>' : '\U000022b2',
|
||||
'\\<rhd>' : '\U000022b3',
|
||||
'\\<unlhd>' : '\U000022b4',
|
||||
'\\<unrhd>' : '\U000022b5',
|
||||
'\\<triangleleft>' : '\U000025c3',
|
||||
'\\<triangleright>' : '\U000025b9',
|
||||
'\\<triangle>' : '\U000025b3',
|
||||
'\\<triangleq>' : '\U0000225c',
|
||||
'\\<oplus>' : '\U00002295',
|
||||
'\\<Oplus>' : '\U00002a01',
|
||||
'\\<otimes>' : '\U00002297',
|
||||
'\\<Otimes>' : '\U00002a02',
|
||||
'\\<odot>' : '\U00002299',
|
||||
'\\<Odot>' : '\U00002a00',
|
||||
'\\<ominus>' : '\U00002296',
|
||||
'\\<oslash>' : '\U00002298',
|
||||
'\\<dots>' : '\U00002026',
|
||||
'\\<cdots>' : '\U000022ef',
|
||||
'\\<Sum>' : '\U00002211',
|
||||
'\\<Prod>' : '\U0000220f',
|
||||
'\\<Coprod>' : '\U00002210',
|
||||
'\\<infinity>' : '\U0000221e',
|
||||
'\\<integral>' : '\U0000222b',
|
||||
'\\<ointegral>' : '\U0000222e',
|
||||
'\\<clubsuit>' : '\U00002663',
|
||||
'\\<diamondsuit>' : '\U00002662',
|
||||
'\\<heartsuit>' : '\U00002661',
|
||||
'\\<spadesuit>' : '\U00002660',
|
||||
'\\<aleph>' : '\U00002135',
|
||||
'\\<emptyset>' : '\U00002205',
|
||||
'\\<nabla>' : '\U00002207',
|
||||
'\\<partial>' : '\U00002202',
|
||||
'\\<flat>' : '\U0000266d',
|
||||
'\\<natural>' : '\U0000266e',
|
||||
'\\<sharp>' : '\U0000266f',
|
||||
'\\<angle>' : '\U00002220',
|
||||
'\\<copyright>' : '\U000000a9',
|
||||
'\\<registered>' : '\U000000ae',
|
||||
'\\<hyphen>' : '\U000000ad',
|
||||
'\\<inverse>' : '\U000000af',
|
||||
'\\<onequarter>' : '\U000000bc',
|
||||
'\\<onehalf>' : '\U000000bd',
|
||||
'\\<threequarters>' : '\U000000be',
|
||||
'\\<ordfeminine>' : '\U000000aa',
|
||||
'\\<ordmasculine>' : '\U000000ba',
|
||||
'\\<section>' : '\U000000a7',
|
||||
'\\<paragraph>' : '\U000000b6',
|
||||
'\\<exclamdown>' : '\U000000a1',
|
||||
'\\<questiondown>' : '\U000000bf',
|
||||
'\\<euro>' : '\U000020ac',
|
||||
'\\<pounds>' : '\U000000a3',
|
||||
'\\<yen>' : '\U000000a5',
|
||||
'\\<cent>' : '\U000000a2',
|
||||
'\\<currency>' : '\U000000a4',
|
||||
'\\<degree>' : '\U000000b0',
|
||||
'\\<amalg>' : '\U00002a3f',
|
||||
'\\<mho>' : '\U00002127',
|
||||
'\\<lozenge>' : '\U000025ca',
|
||||
'\\<wp>' : '\U00002118',
|
||||
'\\<wrong>' : '\U00002240',
|
||||
'\\<struct>' : '\U000022c4',
|
||||
'\\<acute>' : '\U000000b4',
|
||||
'\\<index>' : '\U00000131',
|
||||
'\\<dieresis>' : '\U000000a8',
|
||||
'\\<cedilla>' : '\U000000b8',
|
||||
'\\<hungarumlaut>' : '\U000002dd',
|
||||
'\\<some>' : '\U000003f5',
|
||||
'\\<newline>' : '\U000023ce',
|
||||
'\\<open>' : '\U00002039',
|
||||
'\\<close>' : '\U0000203a',
|
||||
'\\<here>' : '\U00002302',
|
||||
'\\<^sub>' : '\U000021e9',
|
||||
'\\<^sup>' : '\U000021e7',
|
||||
'\\<^bold>' : '\U00002759',
|
||||
'\\<^bsub>' : '\U000021d8',
|
||||
'\\<^esub>' : '\U000021d9',
|
||||
'\\<^bsup>' : '\U000021d7',
|
||||
'\\<^esup>' : '\U000021d6',
|
||||
}
|
||||
|
||||
lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols}
|
||||
|
||||
def __init__(self, **options):
|
||||
Filter.__init__(self, **options)
|
||||
lang = get_choice_opt(options, 'lang',
|
||||
['isabelle', 'latex'], 'isabelle')
|
||||
self.symbols = self.lang_map[lang]
|
||||
|
||||
def filter(self, lexer, stream):
|
||||
for ttype, value in stream:
|
||||
if value in self.symbols:
|
||||
yield ttype, self.symbols[value]
|
||||
else:
|
||||
yield ttype, value
|
||||
|
||||
|
||||
class KeywordCaseFilter(Filter):
|
||||
"""Convert keywords to lowercase or uppercase or capitalize them, which
|
||||
means first letter uppercase, rest lowercase.
|
||||
|
||||
This can be useful e.g. if you highlight Pascal code and want to adapt the
|
||||
code to your styleguide.
|
||||
|
||||
Options accepted:
|
||||
|
||||
`case` : string
|
||||
The casing to convert keywords to. Must be one of ``'lower'``,
|
||||
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
|
||||
"""
|
||||
|
||||
def __init__(self, **options):
|
||||
Filter.__init__(self, **options)
|
||||
case = get_choice_opt(options, 'case',
|
||||
['lower', 'upper', 'capitalize'], 'lower')
|
||||
self.convert = getattr(str, case)
|
||||
|
||||
def filter(self, lexer, stream):
|
||||
for ttype, value in stream:
|
||||
if ttype in Keyword:
|
||||
yield ttype, self.convert(value)
|
||||
else:
|
||||
yield ttype, value
|
||||
|
||||
|
||||
class NameHighlightFilter(Filter):
|
||||
"""Highlight a normal Name (and Name.*) token with a different token type.
|
||||
|
||||
Example::
|
||||
|
||||
filter = NameHighlightFilter(
|
||||
names=['foo', 'bar', 'baz'],
|
||||
tokentype=Name.Function,
|
||||
)
|
||||
|
||||
This would highlight the names "foo", "bar" and "baz"
|
||||
as functions. `Name.Function` is the default token type.
|
||||
|
||||
Options accepted:
|
||||
|
||||
`names` : list of strings
|
||||
A list of names that should be given the different token type.
|
||||
There is no default.
|
||||
`tokentype` : TokenType or string
|
||||
A token type or a string containing a token type name that is
|
||||
used for highlighting the strings in `names`. The default is
|
||||
`Name.Function`.
|
||||
"""
|
||||
|
||||
def __init__(self, **options):
|
||||
Filter.__init__(self, **options)
|
||||
self.names = set(get_list_opt(options, 'names', []))
|
||||
tokentype = options.get('tokentype')
|
||||
if tokentype:
|
||||
self.tokentype = string_to_tokentype(tokentype)
|
||||
else:
|
||||
self.tokentype = Name.Function
|
||||
|
||||
def filter(self, lexer, stream):
|
||||
for ttype, value in stream:
|
||||
if ttype in Name and value in self.names:
|
||||
yield self.tokentype, value
|
||||
else:
|
||||
yield ttype, value
|
||||
|
||||
|
||||
class ErrorToken(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class RaiseOnErrorTokenFilter(Filter):
|
||||
"""Raise an exception when the lexer generates an error token.
|
||||
|
||||
Options accepted:
|
||||
|
||||
`excclass` : Exception class
|
||||
The exception class to raise.
|
||||
The default is `pygments.filters.ErrorToken`.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
|
||||
def __init__(self, **options):
|
||||
Filter.__init__(self, **options)
|
||||
self.exception = options.get('excclass', ErrorToken)
|
||||
try:
|
||||
# issubclass() will raise TypeError if first argument is not a class
|
||||
if not issubclass(self.exception, Exception):
|
||||
raise TypeError
|
||||
except TypeError:
|
||||
raise OptionError('excclass option is not an exception class')
|
||||
|
||||
def filter(self, lexer, stream):
|
||||
for ttype, value in stream:
|
||||
if ttype is Error:
|
||||
raise self.exception(value)
|
||||
yield ttype, value
|
||||
|
||||
|
||||
class VisibleWhitespaceFilter(Filter):
|
||||
"""Convert tabs, newlines and/or spaces to visible characters.
|
||||
|
||||
Options accepted:
|
||||
|
||||
`spaces` : string or bool
|
||||
If this is a one-character string, spaces will be replaces by this string.
|
||||
If it is another true value, spaces will be replaced by ``·`` (unicode
|
||||
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
|
||||
default is ``False``.
|
||||
`tabs` : string or bool
|
||||
The same as for `spaces`, but the default replacement character is ``»``
|
||||
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
|
||||
is ``False``. Note: this will not work if the `tabsize` option for the
|
||||
lexer is nonzero, as tabs will already have been expanded then.
|
||||
`tabsize` : int
|
||||
If tabs are to be replaced by this filter (see the `tabs` option), this
|
||||
is the total number of characters that a tab should be expanded to.
|
||||
The default is ``8``.
|
||||
`newlines` : string or bool
|
||||
The same as for `spaces`, but the default replacement character is ``¶``
|
||||
(unicode PILCROW SIGN). The default value is ``False``.
|
||||
`wstokentype` : bool
|
||||
If true, give whitespace the special `Whitespace` token type. This allows
|
||||
styling the visible whitespace differently (e.g. greyed out), but it can
|
||||
disrupt background colors. The default is ``True``.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
|
||||
def __init__(self, **options):
|
||||
Filter.__init__(self, **options)
|
||||
for name, default in [('spaces', '·'),
|
||||
('tabs', '»'),
|
||||
('newlines', '¶')]:
|
||||
opt = options.get(name, False)
|
||||
if isinstance(opt, str) and len(opt) == 1:
|
||||
setattr(self, name, opt)
|
||||
else:
|
||||
setattr(self, name, (opt and default or ''))
|
||||
tabsize = get_int_opt(options, 'tabsize', 8)
|
||||
if self.tabs:
|
||||
self.tabs += ' ' * (tabsize - 1)
|
||||
if self.newlines:
|
||||
self.newlines += '\n'
|
||||
self.wstt = get_bool_opt(options, 'wstokentype', True)
|
||||
|
||||
def filter(self, lexer, stream):
|
||||
if self.wstt:
|
||||
spaces = self.spaces or ' '
|
||||
tabs = self.tabs or '\t'
|
||||
newlines = self.newlines or '\n'
|
||||
regex = re.compile(r'\s')
|
||||
|
||||
def replacefunc(wschar):
|
||||
if wschar == ' ':
|
||||
return spaces
|
||||
elif wschar == '\t':
|
||||
return tabs
|
||||
elif wschar == '\n':
|
||||
return newlines
|
||||
return wschar
|
||||
|
||||
for ttype, value in stream:
|
||||
yield from _replace_special(ttype, value, regex, Whitespace,
|
||||
replacefunc)
|
||||
else:
|
||||
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
|
||||
# simpler processing
|
||||
for ttype, value in stream:
|
||||
if spaces:
|
||||
value = value.replace(' ', spaces)
|
||||
if tabs:
|
||||
value = value.replace('\t', tabs)
|
||||
if newlines:
|
||||
value = value.replace('\n', newlines)
|
||||
yield ttype, value
|
||||
|
||||
|
||||
class GobbleFilter(Filter):
|
||||
"""Gobbles source code lines (eats initial characters).
|
||||
|
||||
This filter drops the first ``n`` characters off every line of code. This
|
||||
may be useful when the source code fed to the lexer is indented by a fixed
|
||||
amount of space that isn't desired in the output.
|
||||
|
||||
Options accepted:
|
||||
|
||||
`n` : int
|
||||
The number of characters to gobble.
|
||||
|
||||
.. versionadded:: 1.2
|
||||
"""
|
||||
def __init__(self, **options):
|
||||
Filter.__init__(self, **options)
|
||||
self.n = get_int_opt(options, 'n', 0)
|
||||
|
||||
def gobble(self, value, left):
|
||||
if left < len(value):
|
||||
return value[left:], 0
|
||||
else:
|
||||
return '', left - len(value)
|
||||
|
||||
def filter(self, lexer, stream):
|
||||
n = self.n
|
||||
left = n # How many characters left to gobble.
|
||||
for ttype, value in stream:
|
||||
# Remove ``left`` tokens from first line, ``n`` from all others.
|
||||
parts = value.split('\n')
|
||||
(parts[0], left) = self.gobble(parts[0], left)
|
||||
for i in range(1, len(parts)):
|
||||
(parts[i], left) = self.gobble(parts[i], n)
|
||||
value = '\n'.join(parts)
|
||||
|
||||
if value != '':
|
||||
yield ttype, value
|
||||
|
||||
|
||||
class TokenMergeFilter(Filter):
|
||||
"""Merges consecutive tokens with the same token type in the output
|
||||
stream of a lexer.
|
||||
|
||||
.. versionadded:: 1.2
|
||||
"""
|
||||
def __init__(self, **options):
|
||||
Filter.__init__(self, **options)
|
||||
|
||||
def filter(self, lexer, stream):
|
||||
current_type = None
|
||||
current_value = None
|
||||
for ttype, value in stream:
|
||||
if ttype is current_type:
|
||||
current_value += value
|
||||
else:
|
||||
if current_type is not None:
|
||||
yield current_type, current_value
|
||||
current_type = ttype
|
||||
current_value = value
|
||||
if current_type is not None:
|
||||
yield current_type, current_value
|
||||
|
||||
|
||||
FILTERS = {
|
||||
'codetagify': CodeTagFilter,
|
||||
'keywordcase': KeywordCaseFilter,
|
||||
'highlight': NameHighlightFilter,
|
||||
'raiseonerror': RaiseOnErrorTokenFilter,
|
||||
'whitespace': VisibleWhitespaceFilter,
|
||||
'gobble': GobbleFilter,
|
||||
'tokenmerge': TokenMergeFilter,
|
||||
'symbols': SymbolFilter,
|
||||
}
|
||||
Binary file not shown.
@@ -0,0 +1,129 @@
|
||||
"""
|
||||
pygments.formatter
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Base formatter class.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import codecs
|
||||
|
||||
from pip._vendor.pygments.util import get_bool_opt
|
||||
from pip._vendor.pygments.styles import get_style_by_name
|
||||
|
||||
__all__ = ['Formatter']
|
||||
|
||||
|
||||
def _lookup_style(style):
|
||||
if isinstance(style, str):
|
||||
return get_style_by_name(style)
|
||||
return style
|
||||
|
||||
|
||||
class Formatter:
|
||||
"""
|
||||
Converts a token stream to text.
|
||||
|
||||
Formatters should have attributes to help selecting them. These
|
||||
are similar to the corresponding :class:`~pygments.lexer.Lexer`
|
||||
attributes.
|
||||
|
||||
.. autoattribute:: name
|
||||
:no-value:
|
||||
|
||||
.. autoattribute:: aliases
|
||||
:no-value:
|
||||
|
||||
.. autoattribute:: filenames
|
||||
:no-value:
|
||||
|
||||
You can pass options as keyword arguments to the constructor.
|
||||
All formatters accept these basic options:
|
||||
|
||||
``style``
|
||||
The style to use, can be a string or a Style subclass
|
||||
(default: "default"). Not used by e.g. the
|
||||
TerminalFormatter.
|
||||
``full``
|
||||
Tells the formatter to output a "full" document, i.e.
|
||||
a complete self-contained document. This doesn't have
|
||||
any effect for some formatters (default: false).
|
||||
``title``
|
||||
If ``full`` is true, the title that should be used to
|
||||
caption the document (default: '').
|
||||
``encoding``
|
||||
If given, must be an encoding name. This will be used to
|
||||
convert the Unicode token strings to byte strings in the
|
||||
output. If it is "" or None, Unicode strings will be written
|
||||
to the output file, which most file-like objects do not
|
||||
support (default: None).
|
||||
``outencoding``
|
||||
Overrides ``encoding`` if given.
|
||||
|
||||
"""
|
||||
|
||||
#: Full name for the formatter, in human-readable form.
|
||||
name = None
|
||||
|
||||
#: A list of short, unique identifiers that can be used to lookup
|
||||
#: the formatter from a list, e.g. using :func:`.get_formatter_by_name()`.
|
||||
aliases = []
|
||||
|
||||
#: A list of fnmatch patterns that match filenames for which this
|
||||
#: formatter can produce output. The patterns in this list should be unique
|
||||
#: among all formatters.
|
||||
filenames = []
|
||||
|
||||
#: If True, this formatter outputs Unicode strings when no encoding
|
||||
#: option is given.
|
||||
unicodeoutput = True
|
||||
|
||||
def __init__(self, **options):
|
||||
"""
|
||||
As with lexers, this constructor takes arbitrary optional arguments,
|
||||
and if you override it, you should first process your own options, then
|
||||
call the base class implementation.
|
||||
"""
|
||||
self.style = _lookup_style(options.get('style', 'default'))
|
||||
self.full = get_bool_opt(options, 'full', False)
|
||||
self.title = options.get('title', '')
|
||||
self.encoding = options.get('encoding', None) or None
|
||||
if self.encoding in ('guess', 'chardet'):
|
||||
# can happen for e.g. pygmentize -O encoding=guess
|
||||
self.encoding = 'utf-8'
|
||||
self.encoding = options.get('outencoding') or self.encoding
|
||||
self.options = options
|
||||
|
||||
def get_style_defs(self, arg=''):
|
||||
"""
|
||||
This method must return statements or declarations suitable to define
|
||||
the current style for subsequent highlighted text (e.g. CSS classes
|
||||
in the `HTMLFormatter`).
|
||||
|
||||
The optional argument `arg` can be used to modify the generation and
|
||||
is formatter dependent (it is standardized because it can be given on
|
||||
the command line).
|
||||
|
||||
This method is called by the ``-S`` :doc:`command-line option <cmdline>`,
|
||||
the `arg` is then given by the ``-a`` option.
|
||||
"""
|
||||
return ''
|
||||
|
||||
def format(self, tokensource, outfile):
|
||||
"""
|
||||
This method must format the tokens from the `tokensource` iterable and
|
||||
write the formatted version to the file object `outfile`.
|
||||
|
||||
Formatter options can control how exactly the tokens are converted.
|
||||
"""
|
||||
if self.encoding:
|
||||
# wrap the outfile in a StreamWriter
|
||||
outfile = codecs.lookup(self.encoding)[3](outfile)
|
||||
return self.format_unencoded(tokensource, outfile)
|
||||
|
||||
# Allow writing Formatter[str] or Formatter[bytes]. That's equivalent to
|
||||
# Formatter. This helps when using third-party type stubs from typeshed.
|
||||
def __class_getitem__(cls, name):
|
||||
return cls
|
||||
@@ -0,0 +1,157 @@
|
||||
"""
|
||||
pygments.formatters
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Pygments formatters.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
import fnmatch
|
||||
from os.path import basename
|
||||
|
||||
from pip._vendor.pygments.formatters._mapping import FORMATTERS
|
||||
from pip._vendor.pygments.plugin import find_plugin_formatters
|
||||
from pip._vendor.pygments.util import ClassNotFound
|
||||
|
||||
__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
|
||||
'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
|
||||
|
||||
_formatter_cache = {} # classes by name
|
||||
_pattern_cache = {}
|
||||
|
||||
|
||||
def _fn_matches(fn, glob):
|
||||
"""Return whether the supplied file name fn matches pattern filename."""
|
||||
if glob not in _pattern_cache:
|
||||
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
|
||||
return pattern.match(fn)
|
||||
return _pattern_cache[glob].match(fn)
|
||||
|
||||
|
||||
def _load_formatters(module_name):
|
||||
"""Load a formatter (and all others in the module too)."""
|
||||
mod = __import__(module_name, None, None, ['__all__'])
|
||||
for formatter_name in mod.__all__:
|
||||
cls = getattr(mod, formatter_name)
|
||||
_formatter_cache[cls.name] = cls
|
||||
|
||||
|
||||
def get_all_formatters():
|
||||
"""Return a generator for all formatter classes."""
|
||||
# NB: this returns formatter classes, not info like get_all_lexers().
|
||||
for info in FORMATTERS.values():
|
||||
if info[1] not in _formatter_cache:
|
||||
_load_formatters(info[0])
|
||||
yield _formatter_cache[info[1]]
|
||||
for _, formatter in find_plugin_formatters():
|
||||
yield formatter
|
||||
|
||||
|
||||
def find_formatter_class(alias):
|
||||
"""Lookup a formatter by alias.
|
||||
|
||||
Returns None if not found.
|
||||
"""
|
||||
for module_name, name, aliases, _, _ in FORMATTERS.values():
|
||||
if alias in aliases:
|
||||
if name not in _formatter_cache:
|
||||
_load_formatters(module_name)
|
||||
return _formatter_cache[name]
|
||||
for _, cls in find_plugin_formatters():
|
||||
if alias in cls.aliases:
|
||||
return cls
|
||||
|
||||
|
||||
def get_formatter_by_name(_alias, **options):
|
||||
"""
|
||||
Return an instance of a :class:`.Formatter` subclass that has `alias` in its
|
||||
aliases list. The formatter is given the `options` at its instantiation.
|
||||
|
||||
Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that
|
||||
alias is found.
|
||||
"""
|
||||
cls = find_formatter_class(_alias)
|
||||
if cls is None:
|
||||
raise ClassNotFound(f"no formatter found for name {_alias!r}")
|
||||
return cls(**options)
|
||||
|
||||
|
||||
def load_formatter_from_file(filename, formattername="CustomFormatter", **options):
|
||||
"""
|
||||
Return a `Formatter` subclass instance loaded from the provided file, relative
|
||||
to the current directory.
|
||||
|
||||
The file is expected to contain a Formatter class named ``formattername``
|
||||
(by default, CustomFormatter). Users should be very careful with the input, because
|
||||
this method is equivalent to running ``eval()`` on the input file. The formatter is
|
||||
given the `options` at its instantiation.
|
||||
|
||||
:exc:`pygments.util.ClassNotFound` is raised if there are any errors loading
|
||||
the formatter.
|
||||
|
||||
.. versionadded:: 2.2
|
||||
"""
|
||||
try:
|
||||
# This empty dict will contain the namespace for the exec'd file
|
||||
custom_namespace = {}
|
||||
with open(filename, 'rb') as f:
|
||||
exec(f.read(), custom_namespace)
|
||||
# Retrieve the class `formattername` from that namespace
|
||||
if formattername not in custom_namespace:
|
||||
raise ClassNotFound(f'no valid {formattername} class found in {filename}')
|
||||
formatter_class = custom_namespace[formattername]
|
||||
# And finally instantiate it with the options
|
||||
return formatter_class(**options)
|
||||
except OSError as err:
|
||||
raise ClassNotFound(f'cannot read {filename}: {err}')
|
||||
except ClassNotFound:
|
||||
raise
|
||||
except Exception as err:
|
||||
raise ClassNotFound(f'error when loading custom formatter: {err}')
|
||||
|
||||
|
||||
def get_formatter_for_filename(fn, **options):
|
||||
"""
|
||||
Return a :class:`.Formatter` subclass instance that has a filename pattern
|
||||
matching `fn`. The formatter is given the `options` at its instantiation.
|
||||
|
||||
Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename
|
||||
is found.
|
||||
"""
|
||||
fn = basename(fn)
|
||||
for modname, name, _, filenames, _ in FORMATTERS.values():
|
||||
for filename in filenames:
|
||||
if _fn_matches(fn, filename):
|
||||
if name not in _formatter_cache:
|
||||
_load_formatters(modname)
|
||||
return _formatter_cache[name](**options)
|
||||
for _name, cls in find_plugin_formatters():
|
||||
for filename in cls.filenames:
|
||||
if _fn_matches(fn, filename):
|
||||
return cls(**options)
|
||||
raise ClassNotFound(f"no formatter found for file name {fn!r}")
|
||||
|
||||
|
||||
class _automodule(types.ModuleType):
|
||||
"""Automatically import formatters."""
|
||||
|
||||
def __getattr__(self, name):
|
||||
info = FORMATTERS.get(name)
|
||||
if info:
|
||||
_load_formatters(info[0])
|
||||
cls = _formatter_cache[info[1]]
|
||||
setattr(self, name, cls)
|
||||
return cls
|
||||
raise AttributeError(name)
|
||||
|
||||
|
||||
oldmod = sys.modules[__name__]
|
||||
newmod = _automodule(__name__)
|
||||
newmod.__dict__.update(oldmod.__dict__)
|
||||
sys.modules[__name__] = newmod
|
||||
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,23 @@
|
||||
# Automatically generated by scripts/gen_mapfiles.py.
|
||||
# DO NOT EDIT BY HAND; run `tox -e mapfiles` instead.
|
||||
|
||||
FORMATTERS = {
|
||||
'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
|
||||
'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
||||
'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
||||
'GroffFormatter': ('pygments.formatters.groff', 'groff', ('groff', 'troff', 'roff'), (), 'Format tokens with groff escapes to change their color and font style.'),
|
||||
'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags. By default, the content is enclosed in a ``<pre>`` tag, itself wrapped in a ``<div>`` tag (but see the `nowrap` option). The ``<div>``'s CSS class can be set by the `cssclass` option."),
|
||||
'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'),
|
||||
'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
||||
'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
|
||||
'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
|
||||
'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
|
||||
'PangoMarkupFormatter': ('pygments.formatters.pangomarkup', 'Pango Markup', ('pango', 'pangomarkup'), (), 'Format tokens as Pango Markup code. It can then be rendered to an SVG.'),
|
||||
'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
|
||||
'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'),
|
||||
'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
|
||||
'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
|
||||
'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'),
|
||||
'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
|
||||
'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.'),
|
||||
}
|
||||
963
venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py
Normal file
963
venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py
Normal file
@@ -0,0 +1,963 @@
|
||||
"""
|
||||
pygments.lexer
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Base lexer classes.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
from pip._vendor.pygments.filter import apply_filters, Filter
|
||||
from pip._vendor.pygments.filters import get_filter_by_name
|
||||
from pip._vendor.pygments.token import Error, Text, Other, Whitespace, _TokenType
|
||||
from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
|
||||
make_analysator, Future, guess_decode
|
||||
from pip._vendor.pygments.regexopt import regex_opt
|
||||
|
||||
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
|
||||
'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
|
||||
'default', 'words', 'line_re']
|
||||
|
||||
line_re = re.compile('.*?\n')
|
||||
|
||||
_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
|
||||
(b'\xff\xfe\0\0', 'utf-32'),
|
||||
(b'\0\0\xfe\xff', 'utf-32be'),
|
||||
(b'\xff\xfe', 'utf-16'),
|
||||
(b'\xfe\xff', 'utf-16be')]
|
||||
|
||||
_default_analyse = staticmethod(lambda x: 0.0)
|
||||
|
||||
|
||||
class LexerMeta(type):
|
||||
"""
|
||||
This metaclass automagically converts ``analyse_text`` methods into
|
||||
static methods which always return float values.
|
||||
"""
|
||||
|
||||
def __new__(mcs, name, bases, d):
|
||||
if 'analyse_text' in d:
|
||||
d['analyse_text'] = make_analysator(d['analyse_text'])
|
||||
return type.__new__(mcs, name, bases, d)
|
||||
|
||||
|
||||
class Lexer(metaclass=LexerMeta):
|
||||
"""
|
||||
Lexer for a specific language.
|
||||
|
||||
See also :doc:`lexerdevelopment`, a high-level guide to writing
|
||||
lexers.
|
||||
|
||||
Lexer classes have attributes used for choosing the most appropriate
|
||||
lexer based on various criteria.
|
||||
|
||||
.. autoattribute:: name
|
||||
:no-value:
|
||||
.. autoattribute:: aliases
|
||||
:no-value:
|
||||
.. autoattribute:: filenames
|
||||
:no-value:
|
||||
.. autoattribute:: alias_filenames
|
||||
.. autoattribute:: mimetypes
|
||||
:no-value:
|
||||
.. autoattribute:: priority
|
||||
|
||||
Lexers included in Pygments should have two additional attributes:
|
||||
|
||||
.. autoattribute:: url
|
||||
:no-value:
|
||||
.. autoattribute:: version_added
|
||||
:no-value:
|
||||
|
||||
Lexers included in Pygments may have additional attributes:
|
||||
|
||||
.. autoattribute:: _example
|
||||
:no-value:
|
||||
|
||||
You can pass options to the constructor. The basic options recognized
|
||||
by all lexers and processed by the base `Lexer` class are:
|
||||
|
||||
``stripnl``
|
||||
Strip leading and trailing newlines from the input (default: True).
|
||||
``stripall``
|
||||
Strip all leading and trailing whitespace from the input
|
||||
(default: False).
|
||||
``ensurenl``
|
||||
Make sure that the input ends with a newline (default: True). This
|
||||
is required for some lexers that consume input linewise.
|
||||
|
||||
.. versionadded:: 1.3
|
||||
|
||||
``tabsize``
|
||||
If given and greater than 0, expand tabs in the input (default: 0).
|
||||
``encoding``
|
||||
If given, must be an encoding name. This encoding will be used to
|
||||
convert the input string to Unicode, if it is not already a Unicode
|
||||
string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
|
||||
Latin1 detection. Can also be ``'chardet'`` to use the chardet
|
||||
library, if it is installed.
|
||||
``inencoding``
|
||||
Overrides the ``encoding`` if given.
|
||||
"""
|
||||
|
||||
#: Full name of the lexer, in human-readable form
|
||||
name = None
|
||||
|
||||
#: A list of short, unique identifiers that can be used to look
|
||||
#: up the lexer from a list, e.g., using `get_lexer_by_name()`.
|
||||
aliases = []
|
||||
|
||||
#: A list of `fnmatch` patterns that match filenames which contain
|
||||
#: content for this lexer. The patterns in this list should be unique among
|
||||
#: all lexers.
|
||||
filenames = []
|
||||
|
||||
#: A list of `fnmatch` patterns that match filenames which may or may not
|
||||
#: contain content for this lexer. This list is used by the
|
||||
#: :func:`.guess_lexer_for_filename()` function, to determine which lexers
|
||||
#: are then included in guessing the correct one. That means that
|
||||
#: e.g. every lexer for HTML and a template language should include
|
||||
#: ``\*.html`` in this list.
|
||||
alias_filenames = []
|
||||
|
||||
#: A list of MIME types for content that can be lexed with this lexer.
|
||||
mimetypes = []
|
||||
|
||||
#: Priority, should multiple lexers match and no content is provided
|
||||
priority = 0
|
||||
|
||||
#: URL of the language specification/definition. Used in the Pygments
|
||||
#: documentation. Set to an empty string to disable.
|
||||
url = None
|
||||
|
||||
#: Version of Pygments in which the lexer was added.
|
||||
version_added = None
|
||||
|
||||
#: Example file name. Relative to the ``tests/examplefiles`` directory.
|
||||
#: This is used by the documentation generator to show an example.
|
||||
_example = None
|
||||
|
||||
def __init__(self, **options):
|
||||
"""
|
||||
This constructor takes arbitrary options as keyword arguments.
|
||||
Every subclass must first process its own options and then call
|
||||
the `Lexer` constructor, since it processes the basic
|
||||
options like `stripnl`.
|
||||
|
||||
An example looks like this:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
def __init__(self, **options):
|
||||
self.compress = options.get('compress', '')
|
||||
Lexer.__init__(self, **options)
|
||||
|
||||
As these options must all be specifiable as strings (due to the
|
||||
command line usage), there are various utility functions
|
||||
available to help with that, see `Utilities`_.
|
||||
"""
|
||||
self.options = options
|
||||
self.stripnl = get_bool_opt(options, 'stripnl', True)
|
||||
self.stripall = get_bool_opt(options, 'stripall', False)
|
||||
self.ensurenl = get_bool_opt(options, 'ensurenl', True)
|
||||
self.tabsize = get_int_opt(options, 'tabsize', 0)
|
||||
self.encoding = options.get('encoding', 'guess')
|
||||
self.encoding = options.get('inencoding') or self.encoding
|
||||
self.filters = []
|
||||
for filter_ in get_list_opt(options, 'filters', ()):
|
||||
self.add_filter(filter_)
|
||||
|
||||
def __repr__(self):
|
||||
if self.options:
|
||||
return f'<pygments.lexers.{self.__class__.__name__} with {self.options!r}>'
|
||||
else:
|
||||
return f'<pygments.lexers.{self.__class__.__name__}>'
|
||||
|
||||
def add_filter(self, filter_, **options):
|
||||
"""
|
||||
Add a new stream filter to this lexer.
|
||||
"""
|
||||
if not isinstance(filter_, Filter):
|
||||
filter_ = get_filter_by_name(filter_, **options)
|
||||
self.filters.append(filter_)
|
||||
|
||||
def analyse_text(text):
|
||||
"""
|
||||
A static method which is called for lexer guessing.
|
||||
|
||||
It should analyse the text and return a float in the range
|
||||
from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer
|
||||
will not be selected as the most probable one, if it returns
|
||||
``1.0``, it will be selected immediately. This is used by
|
||||
`guess_lexer`.
|
||||
|
||||
The `LexerMeta` metaclass automatically wraps this function so
|
||||
that it works like a static method (no ``self`` or ``cls``
|
||||
parameter) and the return value is automatically converted to
|
||||
`float`. If the return value is an object that is boolean `False`
|
||||
it's the same as if the return values was ``0.0``.
|
||||
"""
|
||||
|
||||
def _preprocess_lexer_input(self, text):
|
||||
"""Apply preprocessing such as decoding the input, removing BOM and normalizing newlines."""
|
||||
|
||||
if not isinstance(text, str):
|
||||
if self.encoding == 'guess':
|
||||
text, _ = guess_decode(text)
|
||||
elif self.encoding == 'chardet':
|
||||
try:
|
||||
# pip vendoring note: this code is not reachable by pip,
|
||||
# removed import of chardet to make it clear.
|
||||
raise ImportError('chardet is not vendored by pip')
|
||||
except ImportError as e:
|
||||
raise ImportError('To enable chardet encoding guessing, '
|
||||
'please install the chardet library '
|
||||
'from http://chardet.feedparser.org/') from e
|
||||
# check for BOM first
|
||||
decoded = None
|
||||
for bom, encoding in _encoding_map:
|
||||
if text.startswith(bom):
|
||||
decoded = text[len(bom):].decode(encoding, 'replace')
|
||||
break
|
||||
# no BOM found, so use chardet
|
||||
if decoded is None:
|
||||
enc = chardet.detect(text[:1024]) # Guess using first 1KB
|
||||
decoded = text.decode(enc.get('encoding') or 'utf-8',
|
||||
'replace')
|
||||
text = decoded
|
||||
else:
|
||||
text = text.decode(self.encoding)
|
||||
if text.startswith('\ufeff'):
|
||||
text = text[len('\ufeff'):]
|
||||
else:
|
||||
if text.startswith('\ufeff'):
|
||||
text = text[len('\ufeff'):]
|
||||
|
||||
# text now *is* a unicode string
|
||||
text = text.replace('\r\n', '\n')
|
||||
text = text.replace('\r', '\n')
|
||||
if self.stripall:
|
||||
text = text.strip()
|
||||
elif self.stripnl:
|
||||
text = text.strip('\n')
|
||||
if self.tabsize > 0:
|
||||
text = text.expandtabs(self.tabsize)
|
||||
if self.ensurenl and not text.endswith('\n'):
|
||||
text += '\n'
|
||||
|
||||
return text
|
||||
|
||||
def get_tokens(self, text, unfiltered=False):
|
||||
"""
|
||||
This method is the basic interface of a lexer. It is called by
|
||||
the `highlight()` function. It must process the text and return an
|
||||
iterable of ``(tokentype, value)`` pairs from `text`.
|
||||
|
||||
Normally, you don't need to override this method. The default
|
||||
implementation processes the options recognized by all lexers
|
||||
(`stripnl`, `stripall` and so on), and then yields all tokens
|
||||
from `get_tokens_unprocessed()`, with the ``index`` dropped.
|
||||
|
||||
If `unfiltered` is set to `True`, the filtering mechanism is
|
||||
bypassed even if filters are defined.
|
||||
"""
|
||||
text = self._preprocess_lexer_input(text)
|
||||
|
||||
def streamer():
|
||||
for _, t, v in self.get_tokens_unprocessed(text):
|
||||
yield t, v
|
||||
stream = streamer()
|
||||
if not unfiltered:
|
||||
stream = apply_filters(stream, self.filters, self)
|
||||
return stream
|
||||
|
||||
def get_tokens_unprocessed(self, text):
|
||||
"""
|
||||
This method should process the text and return an iterable of
|
||||
``(index, tokentype, value)`` tuples where ``index`` is the starting
|
||||
position of the token within the input text.
|
||||
|
||||
It must be overridden by subclasses. It is recommended to
|
||||
implement it as a generator to maximize effectiveness.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class DelegatingLexer(Lexer):
|
||||
"""
|
||||
This lexer takes two lexer as arguments. A root lexer and
|
||||
a language lexer. First everything is scanned using the language
|
||||
lexer, afterwards all ``Other`` tokens are lexed using the root
|
||||
lexer.
|
||||
|
||||
The lexers from the ``template`` lexer package use this base lexer.
|
||||
"""
|
||||
|
||||
def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
|
||||
self.root_lexer = _root_lexer(**options)
|
||||
self.language_lexer = _language_lexer(**options)
|
||||
self.needle = _needle
|
||||
Lexer.__init__(self, **options)
|
||||
|
||||
def get_tokens_unprocessed(self, text):
|
||||
buffered = ''
|
||||
insertions = []
|
||||
lng_buffer = []
|
||||
for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
|
||||
if t is self.needle:
|
||||
if lng_buffer:
|
||||
insertions.append((len(buffered), lng_buffer))
|
||||
lng_buffer = []
|
||||
buffered += v
|
||||
else:
|
||||
lng_buffer.append((i, t, v))
|
||||
if lng_buffer:
|
||||
insertions.append((len(buffered), lng_buffer))
|
||||
return do_insertions(insertions,
|
||||
self.root_lexer.get_tokens_unprocessed(buffered))
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# RegexLexer and ExtendedRegexLexer
|
||||
#
|
||||
|
||||
|
||||
class include(str): # pylint: disable=invalid-name
|
||||
"""
|
||||
Indicates that a state should include rules from another state.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class _inherit:
|
||||
"""
|
||||
Indicates the a state should inherit from its superclass.
|
||||
"""
|
||||
def __repr__(self):
|
||||
return 'inherit'
|
||||
|
||||
inherit = _inherit() # pylint: disable=invalid-name
|
||||
|
||||
|
||||
class combined(tuple): # pylint: disable=invalid-name
|
||||
"""
|
||||
Indicates a state combined from multiple states.
|
||||
"""
|
||||
|
||||
def __new__(cls, *args):
|
||||
return tuple.__new__(cls, args)
|
||||
|
||||
def __init__(self, *args):
|
||||
# tuple.__init__ doesn't do anything
|
||||
pass
|
||||
|
||||
|
||||
class _PseudoMatch:
|
||||
"""
|
||||
A pseudo match object constructed from a string.
|
||||
"""
|
||||
|
||||
def __init__(self, start, text):
|
||||
self._text = text
|
||||
self._start = start
|
||||
|
||||
def start(self, arg=None):
|
||||
return self._start
|
||||
|
||||
def end(self, arg=None):
|
||||
return self._start + len(self._text)
|
||||
|
||||
def group(self, arg=None):
|
||||
if arg:
|
||||
raise IndexError('No such group')
|
||||
return self._text
|
||||
|
||||
def groups(self):
|
||||
return (self._text,)
|
||||
|
||||
def groupdict(self):
|
||||
return {}
|
||||
|
||||
|
||||
def bygroups(*args):
|
||||
"""
|
||||
Callback that yields multiple actions for each group in the match.
|
||||
"""
|
||||
def callback(lexer, match, ctx=None):
|
||||
for i, action in enumerate(args):
|
||||
if action is None:
|
||||
continue
|
||||
elif type(action) is _TokenType:
|
||||
data = match.group(i + 1)
|
||||
if data:
|
||||
yield match.start(i + 1), action, data
|
||||
else:
|
||||
data = match.group(i + 1)
|
||||
if data is not None:
|
||||
if ctx:
|
||||
ctx.pos = match.start(i + 1)
|
||||
for item in action(lexer,
|
||||
_PseudoMatch(match.start(i + 1), data), ctx):
|
||||
if item:
|
||||
yield item
|
||||
if ctx:
|
||||
ctx.pos = match.end()
|
||||
return callback
|
||||
|
||||
|
||||
class _This:
|
||||
"""
|
||||
Special singleton used for indicating the caller class.
|
||||
Used by ``using``.
|
||||
"""
|
||||
|
||||
this = _This()
|
||||
|
||||
|
||||
def using(_other, **kwargs):
|
||||
"""
|
||||
Callback that processes the match with a different lexer.
|
||||
|
||||
The keyword arguments are forwarded to the lexer, except `state` which
|
||||
is handled separately.
|
||||
|
||||
`state` specifies the state that the new lexer will start in, and can
|
||||
be an enumerable such as ('root', 'inline', 'string') or a simple
|
||||
string which is assumed to be on top of the root state.
|
||||
|
||||
Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
|
||||
"""
|
||||
gt_kwargs = {}
|
||||
if 'state' in kwargs:
|
||||
s = kwargs.pop('state')
|
||||
if isinstance(s, (list, tuple)):
|
||||
gt_kwargs['stack'] = s
|
||||
else:
|
||||
gt_kwargs['stack'] = ('root', s)
|
||||
|
||||
if _other is this:
|
||||
def callback(lexer, match, ctx=None):
|
||||
# if keyword arguments are given the callback
|
||||
# function has to create a new lexer instance
|
||||
if kwargs:
|
||||
# XXX: cache that somehow
|
||||
kwargs.update(lexer.options)
|
||||
lx = lexer.__class__(**kwargs)
|
||||
else:
|
||||
lx = lexer
|
||||
s = match.start()
|
||||
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
|
||||
yield i + s, t, v
|
||||
if ctx:
|
||||
ctx.pos = match.end()
|
||||
else:
|
||||
def callback(lexer, match, ctx=None):
|
||||
# XXX: cache that somehow
|
||||
kwargs.update(lexer.options)
|
||||
lx = _other(**kwargs)
|
||||
|
||||
s = match.start()
|
||||
for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
|
||||
yield i + s, t, v
|
||||
if ctx:
|
||||
ctx.pos = match.end()
|
||||
return callback
|
||||
|
||||
|
||||
class default:
|
||||
"""
|
||||
Indicates a state or state action (e.g. #pop) to apply.
|
||||
For example default('#pop') is equivalent to ('', Token, '#pop')
|
||||
Note that state tuples may be used as well.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
def __init__(self, state):
|
||||
self.state = state
|
||||
|
||||
|
||||
class words(Future):
|
||||
"""
|
||||
Indicates a list of literal words that is transformed into an optimized
|
||||
regex that matches any of the words.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
def __init__(self, words, prefix='', suffix=''):
|
||||
self.words = words
|
||||
self.prefix = prefix
|
||||
self.suffix = suffix
|
||||
|
||||
def get(self):
|
||||
return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
|
||||
|
||||
|
||||
class RegexLexerMeta(LexerMeta):
|
||||
"""
|
||||
Metaclass for RegexLexer, creates the self._tokens attribute from
|
||||
self.tokens on the first instantiation.
|
||||
"""
|
||||
|
||||
def _process_regex(cls, regex, rflags, state):
|
||||
"""Preprocess the regular expression component of a token definition."""
|
||||
if isinstance(regex, Future):
|
||||
regex = regex.get()
|
||||
return re.compile(regex, rflags).match
|
||||
|
||||
def _process_token(cls, token):
|
||||
"""Preprocess the token component of a token definition."""
|
||||
assert type(token) is _TokenType or callable(token), \
|
||||
f'token type must be simple type or callable, not {token!r}'
|
||||
return token
|
||||
|
||||
def _process_new_state(cls, new_state, unprocessed, processed):
|
||||
"""Preprocess the state transition action of a token definition."""
|
||||
if isinstance(new_state, str):
|
||||
# an existing state
|
||||
if new_state == '#pop':
|
||||
return -1
|
||||
elif new_state in unprocessed:
|
||||
return (new_state,)
|
||||
elif new_state == '#push':
|
||||
return new_state
|
||||
elif new_state[:5] == '#pop:':
|
||||
return -int(new_state[5:])
|
||||
else:
|
||||
assert False, f'unknown new state {new_state!r}'
|
||||
elif isinstance(new_state, combined):
|
||||
# combine a new state from existing ones
|
||||
tmp_state = '_tmp_%d' % cls._tmpname
|
||||
cls._tmpname += 1
|
||||
itokens = []
|
||||
for istate in new_state:
|
||||
assert istate != new_state, f'circular state ref {istate!r}'
|
||||
itokens.extend(cls._process_state(unprocessed,
|
||||
processed, istate))
|
||||
processed[tmp_state] = itokens
|
||||
return (tmp_state,)
|
||||
elif isinstance(new_state, tuple):
|
||||
# push more than one state
|
||||
for istate in new_state:
|
||||
assert (istate in unprocessed or
|
||||
istate in ('#pop', '#push')), \
|
||||
'unknown new state ' + istate
|
||||
return new_state
|
||||
else:
|
||||
assert False, f'unknown new state def {new_state!r}'
|
||||
|
||||
def _process_state(cls, unprocessed, processed, state):
|
||||
"""Preprocess a single state definition."""
|
||||
assert isinstance(state, str), f"wrong state name {state!r}"
|
||||
assert state[0] != '#', f"invalid state name {state!r}"
|
||||
if state in processed:
|
||||
return processed[state]
|
||||
tokens = processed[state] = []
|
||||
rflags = cls.flags
|
||||
for tdef in unprocessed[state]:
|
||||
if isinstance(tdef, include):
|
||||
# it's a state reference
|
||||
assert tdef != state, f"circular state reference {state!r}"
|
||||
tokens.extend(cls._process_state(unprocessed, processed,
|
||||
str(tdef)))
|
||||
continue
|
||||
if isinstance(tdef, _inherit):
|
||||
# should be processed already, but may not in the case of:
|
||||
# 1. the state has no counterpart in any parent
|
||||
# 2. the state includes more than one 'inherit'
|
||||
continue
|
||||
if isinstance(tdef, default):
|
||||
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
|
||||
tokens.append((re.compile('').match, None, new_state))
|
||||
continue
|
||||
|
||||
assert type(tdef) is tuple, f"wrong rule def {tdef!r}"
|
||||
|
||||
try:
|
||||
rex = cls._process_regex(tdef[0], rflags, state)
|
||||
except Exception as err:
|
||||
raise ValueError(f"uncompilable regex {tdef[0]!r} in state {state!r} of {cls!r}: {err}") from err
|
||||
|
||||
token = cls._process_token(tdef[1])
|
||||
|
||||
if len(tdef) == 2:
|
||||
new_state = None
|
||||
else:
|
||||
new_state = cls._process_new_state(tdef[2],
|
||||
unprocessed, processed)
|
||||
|
||||
tokens.append((rex, token, new_state))
|
||||
return tokens
|
||||
|
||||
def process_tokendef(cls, name, tokendefs=None):
|
||||
"""Preprocess a dictionary of token definitions."""
|
||||
processed = cls._all_tokens[name] = {}
|
||||
tokendefs = tokendefs or cls.tokens[name]
|
||||
for state in list(tokendefs):
|
||||
cls._process_state(tokendefs, processed, state)
|
||||
return processed
|
||||
|
||||
def get_tokendefs(cls):
|
||||
"""
|
||||
Merge tokens from superclasses in MRO order, returning a single tokendef
|
||||
dictionary.
|
||||
|
||||
Any state that is not defined by a subclass will be inherited
|
||||
automatically. States that *are* defined by subclasses will, by
|
||||
default, override that state in the superclass. If a subclass wishes to
|
||||
inherit definitions from a superclass, it can use the special value
|
||||
"inherit", which will cause the superclass' state definition to be
|
||||
included at that point in the state.
|
||||
"""
|
||||
tokens = {}
|
||||
inheritable = {}
|
||||
for c in cls.__mro__:
|
||||
toks = c.__dict__.get('tokens', {})
|
||||
|
||||
for state, items in toks.items():
|
||||
curitems = tokens.get(state)
|
||||
if curitems is None:
|
||||
# N.b. because this is assigned by reference, sufficiently
|
||||
# deep hierarchies are processed incrementally (e.g. for
|
||||
# A(B), B(C), C(RegexLexer), B will be premodified so X(B)
|
||||
# will not see any inherits in B).
|
||||
tokens[state] = items
|
||||
try:
|
||||
inherit_ndx = items.index(inherit)
|
||||
except ValueError:
|
||||
continue
|
||||
inheritable[state] = inherit_ndx
|
||||
continue
|
||||
|
||||
inherit_ndx = inheritable.pop(state, None)
|
||||
if inherit_ndx is None:
|
||||
continue
|
||||
|
||||
# Replace the "inherit" value with the items
|
||||
curitems[inherit_ndx:inherit_ndx+1] = items
|
||||
try:
|
||||
# N.b. this is the index in items (that is, the superclass
|
||||
# copy), so offset required when storing below.
|
||||
new_inh_ndx = items.index(inherit)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
inheritable[state] = inherit_ndx + new_inh_ndx
|
||||
|
||||
return tokens
|
||||
|
||||
def __call__(cls, *args, **kwds):
|
||||
"""Instantiate cls after preprocessing its token definitions."""
|
||||
if '_tokens' not in cls.__dict__:
|
||||
cls._all_tokens = {}
|
||||
cls._tmpname = 0
|
||||
if hasattr(cls, 'token_variants') and cls.token_variants:
|
||||
# don't process yet
|
||||
pass
|
||||
else:
|
||||
cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
|
||||
|
||||
return type.__call__(cls, *args, **kwds)
|
||||
|
||||
|
||||
class RegexLexer(Lexer, metaclass=RegexLexerMeta):
|
||||
"""
|
||||
Base for simple stateful regular expression-based lexers.
|
||||
Simplifies the lexing process so that you need only
|
||||
provide a list of states and regular expressions.
|
||||
"""
|
||||
|
||||
#: Flags for compiling the regular expressions.
|
||||
#: Defaults to MULTILINE.
|
||||
flags = re.MULTILINE
|
||||
|
||||
#: At all time there is a stack of states. Initially, the stack contains
|
||||
#: a single state 'root'. The top of the stack is called "the current state".
|
||||
#:
|
||||
#: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
|
||||
#:
|
||||
#: ``new_state`` can be omitted to signify no state transition.
|
||||
#: If ``new_state`` is a string, it is pushed on the stack. This ensure
|
||||
#: the new current state is ``new_state``.
|
||||
#: If ``new_state`` is a tuple of strings, all of those strings are pushed
|
||||
#: on the stack and the current state will be the last element of the list.
|
||||
#: ``new_state`` can also be ``combined('state1', 'state2', ...)``
|
||||
#: to signify a new, anonymous state combined from the rules of two
|
||||
#: or more existing ones.
|
||||
#: Furthermore, it can be '#pop' to signify going back one step in
|
||||
#: the state stack, or '#push' to push the current state on the stack
|
||||
#: again. Note that if you push while in a combined state, the combined
|
||||
#: state itself is pushed, and not only the state in which the rule is
|
||||
#: defined.
|
||||
#:
|
||||
#: The tuple can also be replaced with ``include('state')``, in which
|
||||
#: case the rules from the state named by the string are included in the
|
||||
#: current one.
|
||||
tokens = {}
|
||||
|
||||
def get_tokens_unprocessed(self, text, stack=('root',)):
|
||||
"""
|
||||
Split ``text`` into (tokentype, text) pairs.
|
||||
|
||||
``stack`` is the initial stack (default: ``['root']``)
|
||||
"""
|
||||
pos = 0
|
||||
tokendefs = self._tokens
|
||||
statestack = list(stack)
|
||||
statetokens = tokendefs[statestack[-1]]
|
||||
while 1:
|
||||
for rexmatch, action, new_state in statetokens:
|
||||
m = rexmatch(text, pos)
|
||||
if m:
|
||||
if action is not None:
|
||||
if type(action) is _TokenType:
|
||||
yield pos, action, m.group()
|
||||
else:
|
||||
yield from action(self, m)
|
||||
pos = m.end()
|
||||
if new_state is not None:
|
||||
# state transition
|
||||
if isinstance(new_state, tuple):
|
||||
for state in new_state:
|
||||
if state == '#pop':
|
||||
if len(statestack) > 1:
|
||||
statestack.pop()
|
||||
elif state == '#push':
|
||||
statestack.append(statestack[-1])
|
||||
else:
|
||||
statestack.append(state)
|
||||
elif isinstance(new_state, int):
|
||||
# pop, but keep at least one state on the stack
|
||||
# (random code leading to unexpected pops should
|
||||
# not allow exceptions)
|
||||
if abs(new_state) >= len(statestack):
|
||||
del statestack[1:]
|
||||
else:
|
||||
del statestack[new_state:]
|
||||
elif new_state == '#push':
|
||||
statestack.append(statestack[-1])
|
||||
else:
|
||||
assert False, f"wrong state def: {new_state!r}"
|
||||
statetokens = tokendefs[statestack[-1]]
|
||||
break
|
||||
else:
|
||||
# We are here only if all state tokens have been considered
|
||||
# and there was not a match on any of them.
|
||||
try:
|
||||
if text[pos] == '\n':
|
||||
# at EOL, reset state to "root"
|
||||
statestack = ['root']
|
||||
statetokens = tokendefs['root']
|
||||
yield pos, Whitespace, '\n'
|
||||
pos += 1
|
||||
continue
|
||||
yield pos, Error, text[pos]
|
||||
pos += 1
|
||||
except IndexError:
|
||||
break
|
||||
|
||||
|
||||
class LexerContext:
|
||||
"""
|
||||
A helper object that holds lexer position data.
|
||||
"""
|
||||
|
||||
def __init__(self, text, pos, stack=None, end=None):
|
||||
self.text = text
|
||||
self.pos = pos
|
||||
self.end = end or len(text) # end=0 not supported ;-)
|
||||
self.stack = stack or ['root']
|
||||
|
||||
def __repr__(self):
|
||||
return f'LexerContext({self.text!r}, {self.pos!r}, {self.stack!r})'
|
||||
|
||||
|
||||
class ExtendedRegexLexer(RegexLexer):
|
||||
"""
|
||||
A RegexLexer that uses a context object to store its state.
|
||||
"""
|
||||
|
||||
def get_tokens_unprocessed(self, text=None, context=None):
|
||||
"""
|
||||
Split ``text`` into (tokentype, text) pairs.
|
||||
If ``context`` is given, use this lexer context instead.
|
||||
"""
|
||||
tokendefs = self._tokens
|
||||
if not context:
|
||||
ctx = LexerContext(text, 0)
|
||||
statetokens = tokendefs['root']
|
||||
else:
|
||||
ctx = context
|
||||
statetokens = tokendefs[ctx.stack[-1]]
|
||||
text = ctx.text
|
||||
while 1:
|
||||
for rexmatch, action, new_state in statetokens:
|
||||
m = rexmatch(text, ctx.pos, ctx.end)
|
||||
if m:
|
||||
if action is not None:
|
||||
if type(action) is _TokenType:
|
||||
yield ctx.pos, action, m.group()
|
||||
ctx.pos = m.end()
|
||||
else:
|
||||
yield from action(self, m, ctx)
|
||||
if not new_state:
|
||||
# altered the state stack?
|
||||
statetokens = tokendefs[ctx.stack[-1]]
|
||||
# CAUTION: callback must set ctx.pos!
|
||||
if new_state is not None:
|
||||
# state transition
|
||||
if isinstance(new_state, tuple):
|
||||
for state in new_state:
|
||||
if state == '#pop':
|
||||
if len(ctx.stack) > 1:
|
||||
ctx.stack.pop()
|
||||
elif state == '#push':
|
||||
ctx.stack.append(ctx.stack[-1])
|
||||
else:
|
||||
ctx.stack.append(state)
|
||||
elif isinstance(new_state, int):
|
||||
# see RegexLexer for why this check is made
|
||||
if abs(new_state) >= len(ctx.stack):
|
||||
del ctx.stack[1:]
|
||||
else:
|
||||
del ctx.stack[new_state:]
|
||||
elif new_state == '#push':
|
||||
ctx.stack.append(ctx.stack[-1])
|
||||
else:
|
||||
assert False, f"wrong state def: {new_state!r}"
|
||||
statetokens = tokendefs[ctx.stack[-1]]
|
||||
break
|
||||
else:
|
||||
try:
|
||||
if ctx.pos >= ctx.end:
|
||||
break
|
||||
if text[ctx.pos] == '\n':
|
||||
# at EOL, reset state to "root"
|
||||
ctx.stack = ['root']
|
||||
statetokens = tokendefs['root']
|
||||
yield ctx.pos, Text, '\n'
|
||||
ctx.pos += 1
|
||||
continue
|
||||
yield ctx.pos, Error, text[ctx.pos]
|
||||
ctx.pos += 1
|
||||
except IndexError:
|
||||
break
|
||||
|
||||
|
||||
def do_insertions(insertions, tokens):
|
||||
"""
|
||||
Helper for lexers which must combine the results of several
|
||||
sublexers.
|
||||
|
||||
``insertions`` is a list of ``(index, itokens)`` pairs.
|
||||
Each ``itokens`` iterable should be inserted at position
|
||||
``index`` into the token stream given by the ``tokens``
|
||||
argument.
|
||||
|
||||
The result is a combined token stream.
|
||||
|
||||
TODO: clean up the code here.
|
||||
"""
|
||||
insertions = iter(insertions)
|
||||
try:
|
||||
index, itokens = next(insertions)
|
||||
except StopIteration:
|
||||
# no insertions
|
||||
yield from tokens
|
||||
return
|
||||
|
||||
realpos = None
|
||||
insleft = True
|
||||
|
||||
# iterate over the token stream where we want to insert
|
||||
# the tokens from the insertion list.
|
||||
for i, t, v in tokens:
|
||||
# first iteration. store the position of first item
|
||||
if realpos is None:
|
||||
realpos = i
|
||||
oldi = 0
|
||||
while insleft and i + len(v) >= index:
|
||||
tmpval = v[oldi:index - i]
|
||||
if tmpval:
|
||||
yield realpos, t, tmpval
|
||||
realpos += len(tmpval)
|
||||
for it_index, it_token, it_value in itokens:
|
||||
yield realpos, it_token, it_value
|
||||
realpos += len(it_value)
|
||||
oldi = index - i
|
||||
try:
|
||||
index, itokens = next(insertions)
|
||||
except StopIteration:
|
||||
insleft = False
|
||||
break # not strictly necessary
|
||||
if oldi < len(v):
|
||||
yield realpos, t, v[oldi:]
|
||||
realpos += len(v) - oldi
|
||||
|
||||
# leftover tokens
|
||||
while insleft:
|
||||
# no normal tokens, set realpos to zero
|
||||
realpos = realpos or 0
|
||||
for p, t, v in itokens:
|
||||
yield realpos, t, v
|
||||
realpos += len(v)
|
||||
try:
|
||||
index, itokens = next(insertions)
|
||||
except StopIteration:
|
||||
insleft = False
|
||||
break # not strictly necessary
|
||||
|
||||
|
||||
class ProfilingRegexLexerMeta(RegexLexerMeta):
|
||||
"""Metaclass for ProfilingRegexLexer, collects regex timing info."""
|
||||
|
||||
def _process_regex(cls, regex, rflags, state):
|
||||
if isinstance(regex, words):
|
||||
rex = regex_opt(regex.words, prefix=regex.prefix,
|
||||
suffix=regex.suffix)
|
||||
else:
|
||||
rex = regex
|
||||
compiled = re.compile(rex, rflags)
|
||||
|
||||
def match_func(text, pos, endpos=sys.maxsize):
|
||||
info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
|
||||
t0 = time.time()
|
||||
res = compiled.match(text, pos, endpos)
|
||||
t1 = time.time()
|
||||
info[0] += 1
|
||||
info[1] += t1 - t0
|
||||
return res
|
||||
return match_func
|
||||
|
||||
|
||||
class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
|
||||
"""Drop-in replacement for RegexLexer that does profiling of its regexes."""
|
||||
|
||||
_prof_data = []
|
||||
_prof_sort_index = 4 # defaults to time per call
|
||||
|
||||
def get_tokens_unprocessed(self, text, stack=('root',)):
|
||||
# this needs to be a stack, since using(this) will produce nested calls
|
||||
self.__class__._prof_data.append({})
|
||||
yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
|
||||
rawdata = self.__class__._prof_data.pop()
|
||||
data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
|
||||
n, 1000 * t, 1000 * t / n)
|
||||
for ((s, r), (n, t)) in rawdata.items()),
|
||||
key=lambda x: x[self._prof_sort_index],
|
||||
reverse=True)
|
||||
sum_total = sum(x[3] for x in data)
|
||||
|
||||
print()
|
||||
print('Profiling result for %s lexing %d chars in %.3f ms' %
|
||||
(self.__class__.__name__, len(text), sum_total))
|
||||
print('=' * 110)
|
||||
print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
|
||||
print('-' * 110)
|
||||
for d in data:
|
||||
print('%-20s %-65s %5d %8.4f %8.4f' % d)
|
||||
print('=' * 110)
|
||||
@@ -0,0 +1,362 @@
|
||||
"""
|
||||
pygments.lexers
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Pygments lexers.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
import fnmatch
|
||||
from os.path import basename
|
||||
|
||||
from pip._vendor.pygments.lexers._mapping import LEXERS
|
||||
from pip._vendor.pygments.modeline import get_filetype_from_buffer
|
||||
from pip._vendor.pygments.plugin import find_plugin_lexers
|
||||
from pip._vendor.pygments.util import ClassNotFound, guess_decode
|
||||
|
||||
COMPAT = {
|
||||
'Python3Lexer': 'PythonLexer',
|
||||
'Python3TracebackLexer': 'PythonTracebackLexer',
|
||||
'LeanLexer': 'Lean3Lexer',
|
||||
}
|
||||
|
||||
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
|
||||
'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT)
|
||||
|
||||
_lexer_cache = {}
|
||||
_pattern_cache = {}
|
||||
|
||||
|
||||
def _fn_matches(fn, glob):
|
||||
"""Return whether the supplied file name fn matches pattern filename."""
|
||||
if glob not in _pattern_cache:
|
||||
pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
|
||||
return pattern.match(fn)
|
||||
return _pattern_cache[glob].match(fn)
|
||||
|
||||
|
||||
def _load_lexers(module_name):
|
||||
"""Load a lexer (and all others in the module too)."""
|
||||
mod = __import__(module_name, None, None, ['__all__'])
|
||||
for lexer_name in mod.__all__:
|
||||
cls = getattr(mod, lexer_name)
|
||||
_lexer_cache[cls.name] = cls
|
||||
|
||||
|
||||
def get_all_lexers(plugins=True):
|
||||
"""Return a generator of tuples in the form ``(name, aliases,
|
||||
filenames, mimetypes)`` of all know lexers.
|
||||
|
||||
If *plugins* is true (the default), plugin lexers supplied by entrypoints
|
||||
are also returned. Otherwise, only builtin ones are considered.
|
||||
"""
|
||||
for item in LEXERS.values():
|
||||
yield item[1:]
|
||||
if plugins:
|
||||
for lexer in find_plugin_lexers():
|
||||
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
|
||||
|
||||
|
||||
def find_lexer_class(name):
|
||||
"""
|
||||
Return the `Lexer` subclass that with the *name* attribute as given by
|
||||
the *name* argument.
|
||||
"""
|
||||
if name in _lexer_cache:
|
||||
return _lexer_cache[name]
|
||||
# lookup builtin lexers
|
||||
for module_name, lname, aliases, _, _ in LEXERS.values():
|
||||
if name == lname:
|
||||
_load_lexers(module_name)
|
||||
return _lexer_cache[name]
|
||||
# continue with lexers from setuptools entrypoints
|
||||
for cls in find_plugin_lexers():
|
||||
if cls.name == name:
|
||||
return cls
|
||||
|
||||
|
||||
def find_lexer_class_by_name(_alias):
|
||||
"""
|
||||
Return the `Lexer` subclass that has `alias` in its aliases list, without
|
||||
instantiating it.
|
||||
|
||||
Like `get_lexer_by_name`, but does not instantiate the class.
|
||||
|
||||
Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
|
||||
found.
|
||||
|
||||
.. versionadded:: 2.2
|
||||
"""
|
||||
if not _alias:
|
||||
raise ClassNotFound(f'no lexer for alias {_alias!r} found')
|
||||
# lookup builtin lexers
|
||||
for module_name, name, aliases, _, _ in LEXERS.values():
|
||||
if _alias.lower() in aliases:
|
||||
if name not in _lexer_cache:
|
||||
_load_lexers(module_name)
|
||||
return _lexer_cache[name]
|
||||
# continue with lexers from setuptools entrypoints
|
||||
for cls in find_plugin_lexers():
|
||||
if _alias.lower() in cls.aliases:
|
||||
return cls
|
||||
raise ClassNotFound(f'no lexer for alias {_alias!r} found')
|
||||
|
||||
|
||||
def get_lexer_by_name(_alias, **options):
|
||||
"""
|
||||
Return an instance of a `Lexer` subclass that has `alias` in its
|
||||
aliases list. The lexer is given the `options` at its
|
||||
instantiation.
|
||||
|
||||
Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is
|
||||
found.
|
||||
"""
|
||||
if not _alias:
|
||||
raise ClassNotFound(f'no lexer for alias {_alias!r} found')
|
||||
|
||||
# lookup builtin lexers
|
||||
for module_name, name, aliases, _, _ in LEXERS.values():
|
||||
if _alias.lower() in aliases:
|
||||
if name not in _lexer_cache:
|
||||
_load_lexers(module_name)
|
||||
return _lexer_cache[name](**options)
|
||||
# continue with lexers from setuptools entrypoints
|
||||
for cls in find_plugin_lexers():
|
||||
if _alias.lower() in cls.aliases:
|
||||
return cls(**options)
|
||||
raise ClassNotFound(f'no lexer for alias {_alias!r} found')
|
||||
|
||||
|
||||
def load_lexer_from_file(filename, lexername="CustomLexer", **options):
|
||||
"""Load a lexer from a file.
|
||||
|
||||
This method expects a file located relative to the current working
|
||||
directory, which contains a Lexer class. By default, it expects the
|
||||
Lexer to be name CustomLexer; you can specify your own class name
|
||||
as the second argument to this function.
|
||||
|
||||
Users should be very careful with the input, because this method
|
||||
is equivalent to running eval on the input file.
|
||||
|
||||
Raises ClassNotFound if there are any problems importing the Lexer.
|
||||
|
||||
.. versionadded:: 2.2
|
||||
"""
|
||||
try:
|
||||
# This empty dict will contain the namespace for the exec'd file
|
||||
custom_namespace = {}
|
||||
with open(filename, 'rb') as f:
|
||||
exec(f.read(), custom_namespace)
|
||||
# Retrieve the class `lexername` from that namespace
|
||||
if lexername not in custom_namespace:
|
||||
raise ClassNotFound(f'no valid {lexername} class found in {filename}')
|
||||
lexer_class = custom_namespace[lexername]
|
||||
# And finally instantiate it with the options
|
||||
return lexer_class(**options)
|
||||
except OSError as err:
|
||||
raise ClassNotFound(f'cannot read {filename}: {err}')
|
||||
except ClassNotFound:
|
||||
raise
|
||||
except Exception as err:
|
||||
raise ClassNotFound(f'error when loading custom lexer: {err}')
|
||||
|
||||
|
||||
def find_lexer_class_for_filename(_fn, code=None):
|
||||
"""Get a lexer for a filename.
|
||||
|
||||
If multiple lexers match the filename pattern, use ``analyse_text()`` to
|
||||
figure out which one is more appropriate.
|
||||
|
||||
Returns None if not found.
|
||||
"""
|
||||
matches = []
|
||||
fn = basename(_fn)
|
||||
for modname, name, _, filenames, _ in LEXERS.values():
|
||||
for filename in filenames:
|
||||
if _fn_matches(fn, filename):
|
||||
if name not in _lexer_cache:
|
||||
_load_lexers(modname)
|
||||
matches.append((_lexer_cache[name], filename))
|
||||
for cls in find_plugin_lexers():
|
||||
for filename in cls.filenames:
|
||||
if _fn_matches(fn, filename):
|
||||
matches.append((cls, filename))
|
||||
|
||||
if isinstance(code, bytes):
|
||||
# decode it, since all analyse_text functions expect unicode
|
||||
code = guess_decode(code)
|
||||
|
||||
def get_rating(info):
|
||||
cls, filename = info
|
||||
# explicit patterns get a bonus
|
||||
bonus = '*' not in filename and 0.5 or 0
|
||||
# The class _always_ defines analyse_text because it's included in
|
||||
# the Lexer class. The default implementation returns None which
|
||||
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
|
||||
# to find lexers which need it overridden.
|
||||
if code:
|
||||
return cls.analyse_text(code) + bonus, cls.__name__
|
||||
return cls.priority + bonus, cls.__name__
|
||||
|
||||
if matches:
|
||||
matches.sort(key=get_rating)
|
||||
# print "Possible lexers, after sort:", matches
|
||||
return matches[-1][0]
|
||||
|
||||
|
||||
def get_lexer_for_filename(_fn, code=None, **options):
|
||||
"""Get a lexer for a filename.
|
||||
|
||||
Return a `Lexer` subclass instance that has a filename pattern
|
||||
matching `fn`. The lexer is given the `options` at its
|
||||
instantiation.
|
||||
|
||||
Raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename
|
||||
is found.
|
||||
|
||||
If multiple lexers match the filename pattern, use their ``analyse_text()``
|
||||
methods to figure out which one is more appropriate.
|
||||
"""
|
||||
res = find_lexer_class_for_filename(_fn, code)
|
||||
if not res:
|
||||
raise ClassNotFound(f'no lexer for filename {_fn!r} found')
|
||||
return res(**options)
|
||||
|
||||
|
||||
def get_lexer_for_mimetype(_mime, **options):
|
||||
"""
|
||||
Return a `Lexer` subclass instance that has `mime` in its mimetype
|
||||
list. The lexer is given the `options` at its instantiation.
|
||||
|
||||
Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype
|
||||
is found.
|
||||
"""
|
||||
for modname, name, _, _, mimetypes in LEXERS.values():
|
||||
if _mime in mimetypes:
|
||||
if name not in _lexer_cache:
|
||||
_load_lexers(modname)
|
||||
return _lexer_cache[name](**options)
|
||||
for cls in find_plugin_lexers():
|
||||
if _mime in cls.mimetypes:
|
||||
return cls(**options)
|
||||
raise ClassNotFound(f'no lexer for mimetype {_mime!r} found')
|
||||
|
||||
|
||||
def _iter_lexerclasses(plugins=True):
|
||||
"""Return an iterator over all lexer classes."""
|
||||
for key in sorted(LEXERS):
|
||||
module_name, name = LEXERS[key][:2]
|
||||
if name not in _lexer_cache:
|
||||
_load_lexers(module_name)
|
||||
yield _lexer_cache[name]
|
||||
if plugins:
|
||||
yield from find_plugin_lexers()
|
||||
|
||||
|
||||
def guess_lexer_for_filename(_fn, _text, **options):
|
||||
"""
|
||||
As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames`
|
||||
or `alias_filenames` that matches `filename` are taken into consideration.
|
||||
|
||||
:exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
|
||||
handle the content.
|
||||
"""
|
||||
fn = basename(_fn)
|
||||
primary = {}
|
||||
matching_lexers = set()
|
||||
for lexer in _iter_lexerclasses():
|
||||
for filename in lexer.filenames:
|
||||
if _fn_matches(fn, filename):
|
||||
matching_lexers.add(lexer)
|
||||
primary[lexer] = True
|
||||
for filename in lexer.alias_filenames:
|
||||
if _fn_matches(fn, filename):
|
||||
matching_lexers.add(lexer)
|
||||
primary[lexer] = False
|
||||
if not matching_lexers:
|
||||
raise ClassNotFound(f'no lexer for filename {fn!r} found')
|
||||
if len(matching_lexers) == 1:
|
||||
return matching_lexers.pop()(**options)
|
||||
result = []
|
||||
for lexer in matching_lexers:
|
||||
rv = lexer.analyse_text(_text)
|
||||
if rv == 1.0:
|
||||
return lexer(**options)
|
||||
result.append((rv, lexer))
|
||||
|
||||
def type_sort(t):
|
||||
# sort by:
|
||||
# - analyse score
|
||||
# - is primary filename pattern?
|
||||
# - priority
|
||||
# - last resort: class name
|
||||
return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
|
||||
result.sort(key=type_sort)
|
||||
|
||||
return result[-1][1](**options)
|
||||
|
||||
|
||||
def guess_lexer(_text, **options):
|
||||
"""
|
||||
Return a `Lexer` subclass instance that's guessed from the text in
|
||||
`text`. For that, the :meth:`.analyse_text()` method of every known lexer
|
||||
class is called with the text as argument, and the lexer which returned the
|
||||
highest value will be instantiated and returned.
|
||||
|
||||
:exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can
|
||||
handle the content.
|
||||
"""
|
||||
|
||||
if not isinstance(_text, str):
|
||||
inencoding = options.get('inencoding', options.get('encoding'))
|
||||
if inencoding:
|
||||
_text = _text.decode(inencoding or 'utf8')
|
||||
else:
|
||||
_text, _ = guess_decode(_text)
|
||||
|
||||
# try to get a vim modeline first
|
||||
ft = get_filetype_from_buffer(_text)
|
||||
|
||||
if ft is not None:
|
||||
try:
|
||||
return get_lexer_by_name(ft, **options)
|
||||
except ClassNotFound:
|
||||
pass
|
||||
|
||||
best_lexer = [0.0, None]
|
||||
for lexer in _iter_lexerclasses():
|
||||
rv = lexer.analyse_text(_text)
|
||||
if rv == 1.0:
|
||||
return lexer(**options)
|
||||
if rv > best_lexer[0]:
|
||||
best_lexer[:] = (rv, lexer)
|
||||
if not best_lexer[0] or best_lexer[1] is None:
|
||||
raise ClassNotFound('no lexer matching the text found')
|
||||
return best_lexer[1](**options)
|
||||
|
||||
|
||||
class _automodule(types.ModuleType):
|
||||
"""Automatically import lexers."""
|
||||
|
||||
def __getattr__(self, name):
|
||||
info = LEXERS.get(name)
|
||||
if info:
|
||||
_load_lexers(info[0])
|
||||
cls = _lexer_cache[info[1]]
|
||||
setattr(self, name, cls)
|
||||
return cls
|
||||
if name in COMPAT:
|
||||
return getattr(self, COMPAT[name])
|
||||
raise AttributeError(name)
|
||||
|
||||
|
||||
oldmod = sys.modules[__name__]
|
||||
newmod = _automodule(__name__)
|
||||
newmod.__dict__.update(oldmod.__dict__)
|
||||
sys.modules[__name__] = newmod
|
||||
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,602 @@
|
||||
# Automatically generated by scripts/gen_mapfiles.py.
|
||||
# DO NOT EDIT BY HAND; run `tox -e mapfiles` instead.
|
||||
|
||||
LEXERS = {
|
||||
'ABAPLexer': ('pip._vendor.pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
|
||||
'AMDGPULexer': ('pip._vendor.pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()),
|
||||
'APLLexer': ('pip._vendor.pygments.lexers.apl', 'APL', ('apl',), ('*.apl', '*.aplf', '*.aplo', '*.apln', '*.aplc', '*.apli', '*.dyalog'), ()),
|
||||
'AbnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
|
||||
'ActionScript3Lexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
|
||||
'ActionScriptLexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
|
||||
'AdaLexer': ('pip._vendor.pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
|
||||
'AdlLexer': ('pip._vendor.pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
|
||||
'AgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
|
||||
'AheuiLexer': ('pip._vendor.pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
|
||||
'AlloyLexer': ('pip._vendor.pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
|
||||
'AmbientTalkLexer': ('pip._vendor.pygments.lexers.ambient', 'AmbientTalk', ('ambienttalk', 'ambienttalk/2', 'at'), ('*.at',), ('text/x-ambienttalk',)),
|
||||
'AmplLexer': ('pip._vendor.pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
|
||||
'Angular2HtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
|
||||
'Angular2Lexer': ('pip._vendor.pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
|
||||
'AntlrActionScriptLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-actionscript', 'antlr-as'), ('*.G', '*.g'), ()),
|
||||
'AntlrCSharpLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
|
||||
'AntlrCppLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
|
||||
'AntlrJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
|
||||
'AntlrLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
|
||||
'AntlrObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
|
||||
'AntlrPerlLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
|
||||
'AntlrPythonLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
|
||||
'AntlrRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
|
||||
'ApacheConfLexer': ('pip._vendor.pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
|
||||
'AppleScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
|
||||
'ArduinoLexer': ('pip._vendor.pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
|
||||
'ArrowLexer': ('pip._vendor.pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()),
|
||||
'ArturoLexer': ('pip._vendor.pygments.lexers.arturo', 'Arturo', ('arturo', 'art'), ('*.art',), ()),
|
||||
'AscLexer': ('pip._vendor.pygments.lexers.asc', 'ASCII armored', ('asc', 'pem'), ('*.asc', '*.pem', 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa'), ('application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature', 'application/pem-certificate-chain')),
|
||||
'Asn1Lexer': ('pip._vendor.pygments.lexers.asn1', 'ASN.1', ('asn1',), ('*.asn1',), ()),
|
||||
'AspectJLexer': ('pip._vendor.pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
|
||||
'AsymptoteLexer': ('pip._vendor.pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)),
|
||||
'AugeasLexer': ('pip._vendor.pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
|
||||
'AutoItLexer': ('pip._vendor.pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
|
||||
'AutohotkeyLexer': ('pip._vendor.pygments.lexers.automation', 'autohotkey', ('autohotkey', 'ahk'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
|
||||
'AwkLexer': ('pip._vendor.pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
|
||||
'BBCBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
|
||||
'BBCodeLexer': ('pip._vendor.pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
|
||||
'BCLexer': ('pip._vendor.pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
|
||||
'BQNLexer': ('pip._vendor.pygments.lexers.bqn', 'BQN', ('bqn',), ('*.bqn',), ()),
|
||||
'BSTLexer': ('pip._vendor.pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
|
||||
'BareLexer': ('pip._vendor.pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()),
|
||||
'BaseMakefileLexer': ('pip._vendor.pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
|
||||
'BashLexer': ('pip._vendor.pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell', 'openrc'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', '.kshrc', 'kshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')),
|
||||
'BashSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
|
||||
'BatchLexer': ('pip._vendor.pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
|
||||
'BddLexer': ('pip._vendor.pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)),
|
||||
'BefungeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
|
||||
'BerryLexer': ('pip._vendor.pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')),
|
||||
'BibTeXLexer': ('pip._vendor.pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)),
|
||||
'BlitzBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
|
||||
'BlitzMaxLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
|
||||
'BlueprintLexer': ('pip._vendor.pygments.lexers.blueprint', 'Blueprint', ('blueprint',), ('*.blp',), ('text/x-blueprint',)),
|
||||
'BnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
|
||||
'BoaLexer': ('pip._vendor.pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
|
||||
'BooLexer': ('pip._vendor.pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
|
||||
'BoogieLexer': ('pip._vendor.pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
|
||||
'BrainfuckLexer': ('pip._vendor.pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
|
||||
'BugsLexer': ('pip._vendor.pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
|
||||
'CAmkESLexer': ('pip._vendor.pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
|
||||
'CLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc', '*.x[bp]m'), ('text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap')),
|
||||
'CMakeLexer': ('pip._vendor.pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
|
||||
'CObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
|
||||
'CPSALexer': ('pip._vendor.pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
|
||||
'CSSUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()),
|
||||
'CSharpAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
|
||||
'CSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)),
|
||||
'Ca65Lexer': ('pip._vendor.pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
|
||||
'CadlLexer': ('pip._vendor.pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
|
||||
'CapDLLexer': ('pip._vendor.pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
|
||||
'CapnProtoLexer': ('pip._vendor.pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
|
||||
'CarbonLexer': ('pip._vendor.pygments.lexers.carbon', 'Carbon', ('carbon',), ('*.carbon',), ('text/x-carbon',)),
|
||||
'CbmBasicV2Lexer': ('pip._vendor.pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
|
||||
'CddlLexer': ('pip._vendor.pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)),
|
||||
'CeylonLexer': ('pip._vendor.pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
|
||||
'Cfengine3Lexer': ('pip._vendor.pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
|
||||
'ChaiscriptLexer': ('pip._vendor.pygments.lexers.scripting', 'ChaiScript', ('chaiscript', 'chai'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
|
||||
'ChapelLexer': ('pip._vendor.pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
|
||||
'CharmciLexer': ('pip._vendor.pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()),
|
||||
'CheetahHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
|
||||
'CheetahJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Cheetah', ('javascript+cheetah', 'js+cheetah', 'javascript+spitfire', 'js+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
|
||||
'CheetahLexer': ('pip._vendor.pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
|
||||
'CheetahXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
|
||||
'CirruLexer': ('pip._vendor.pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
|
||||
'ClayLexer': ('pip._vendor.pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
|
||||
'CleanLexer': ('pip._vendor.pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
|
||||
'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')),
|
||||
'ClojureScriptLexer': ('pip._vendor.pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
|
||||
'CobolFreeformatLexer': ('pip._vendor.pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
|
||||
'CobolLexer': ('pip._vendor.pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
|
||||
'CodeQLLexer': ('pip._vendor.pygments.lexers.codeql', 'CodeQL', ('codeql', 'ql'), ('*.ql', '*.qll'), ()),
|
||||
'CoffeeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'CoffeeScript', ('coffeescript', 'coffee-script', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
|
||||
'ColdfusionCFCLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
|
||||
'ColdfusionHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
|
||||
'ColdfusionLexer': ('pip._vendor.pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
|
||||
'Comal80Lexer': ('pip._vendor.pygments.lexers.comal', 'COMAL-80', ('comal', 'comal80'), ('*.cml', '*.comal'), ()),
|
||||
'CommonLispLexer': ('pip._vendor.pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
|
||||
'ComponentPascalLexer': ('pip._vendor.pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
|
||||
'CoqLexer': ('pip._vendor.pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
|
||||
'CplintLexer': ('pip._vendor.pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)),
|
||||
'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')),
|
||||
'CppObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
|
||||
'CrmshLexer': ('pip._vendor.pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
|
||||
'CrocLexer': ('pip._vendor.pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
|
||||
'CryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
|
||||
'CrystalLexer': ('pip._vendor.pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
|
||||
'CsoundDocumentLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
|
||||
'CsoundOrchestraLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
|
||||
'CsoundScoreLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
|
||||
'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')),
|
||||
'CssErbLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)),
|
||||
'CssGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
|
||||
'CssLexer': ('pip._vendor.pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
|
||||
'CssPhpLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
|
||||
'CssSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
|
||||
'CudaLexer': ('pip._vendor.pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
|
||||
'CypherLexer': ('pip._vendor.pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
|
||||
'CythonLexer': ('pip._vendor.pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
|
||||
'DLexer': ('pip._vendor.pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
|
||||
'DObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
|
||||
'DarcsPatchLexer': ('pip._vendor.pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
|
||||
'DartLexer': ('pip._vendor.pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
|
||||
'Dasm16Lexer': ('pip._vendor.pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
|
||||
'DaxLexer': ('pip._vendor.pygments.lexers.dax', 'Dax', ('dax',), ('*.dax',), ()),
|
||||
'DebianControlLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Control file', ('debcontrol', 'control'), ('control',), ()),
|
||||
'DebianSourcesLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Sources file', ('debian.sources',), ('*.sources',), ()),
|
||||
'DelphiLexer': ('pip._vendor.pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
|
||||
'DesktopLexer': ('pip._vendor.pygments.lexers.configs', 'Desktop file', ('desktop',), ('*.desktop',), ('application/x-desktop',)),
|
||||
'DevicetreeLexer': ('pip._vendor.pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)),
|
||||
'DgLexer': ('pip._vendor.pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
|
||||
'DiffLexer': ('pip._vendor.pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
|
||||
'DjangoLexer': ('pip._vendor.pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
|
||||
'DnsZoneLexer': ('pip._vendor.pygments.lexers.dns', 'Zone', ('zone',), ('*.zone',), ('text/dns',)),
|
||||
'DockerLexer': ('pip._vendor.pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
|
||||
'DtdLexer': ('pip._vendor.pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
|
||||
'DuelLexer': ('pip._vendor.pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
|
||||
'DylanConsoleLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
|
||||
'DylanLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
|
||||
'DylanLidLexer': ('pip._vendor.pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
|
||||
'ECLLexer': ('pip._vendor.pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
|
||||
'ECLexer': ('pip._vendor.pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
|
||||
'EarlGreyLexer': ('pip._vendor.pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
|
||||
'EasytrieveLexer': ('pip._vendor.pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
|
||||
'EbnfLexer': ('pip._vendor.pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
|
||||
'EiffelLexer': ('pip._vendor.pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
|
||||
'ElixirConsoleLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
|
||||
'ElixirLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs', '*.leex'), ('text/x-elixir',)),
|
||||
'ElmLexer': ('pip._vendor.pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
|
||||
'ElpiLexer': ('pip._vendor.pygments.lexers.elpi', 'Elpi', ('elpi',), ('*.elpi',), ('text/x-elpi',)),
|
||||
'EmacsLispLexer': ('pip._vendor.pygments.lexers.lisp', 'EmacsLisp', ('emacs-lisp', 'elisp', 'emacs'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
|
||||
'EmailLexer': ('pip._vendor.pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)),
|
||||
'ErbLexer': ('pip._vendor.pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
|
||||
'ErlangLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
|
||||
'ErlangShellLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
|
||||
'EvoqueHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), (), ('text/html+evoque',)),
|
||||
'EvoqueLexer': ('pip._vendor.pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
|
||||
'EvoqueXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), (), ('application/xml+evoque',)),
|
||||
'ExeclineLexer': ('pip._vendor.pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()),
|
||||
'EzhilLexer': ('pip._vendor.pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
|
||||
'FSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi', '*.fsx'), ('text/x-fsharp',)),
|
||||
'FStarLexer': ('pip._vendor.pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)),
|
||||
'FactorLexer': ('pip._vendor.pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
|
||||
'FancyLexer': ('pip._vendor.pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
|
||||
'FantomLexer': ('pip._vendor.pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
|
||||
'FelixLexer': ('pip._vendor.pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
|
||||
'FennelLexer': ('pip._vendor.pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
|
||||
'FiftLexer': ('pip._vendor.pygments.lexers.fift', 'Fift', ('fift', 'fif'), ('*.fif',), ()),
|
||||
'FishShellLexer': ('pip._vendor.pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
|
||||
'FlatlineLexer': ('pip._vendor.pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
|
||||
'FloScriptLexer': ('pip._vendor.pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
|
||||
'ForthLexer': ('pip._vendor.pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
|
||||
'FortranFixedLexer': ('pip._vendor.pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
|
||||
'FortranLexer': ('pip._vendor.pygments.lexers.fortran', 'Fortran', ('fortran', 'f90'), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
|
||||
'FoxProLexer': ('pip._vendor.pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
|
||||
'FreeFemLexer': ('pip._vendor.pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
|
||||
'FuncLexer': ('pip._vendor.pygments.lexers.func', 'FunC', ('func', 'fc'), ('*.fc', '*.func'), ()),
|
||||
'FutharkLexer': ('pip._vendor.pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)),
|
||||
'GAPConsoleLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP session', ('gap-console', 'gap-repl'), ('*.tst',), ()),
|
||||
'GAPLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
|
||||
'GDScriptLexer': ('pip._vendor.pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')),
|
||||
'GLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
|
||||
'GSQLLexer': ('pip._vendor.pygments.lexers.gsql', 'GSQL', ('gsql',), ('*.gsql',), ()),
|
||||
'GasLexer': ('pip._vendor.pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
|
||||
'GcodeLexer': ('pip._vendor.pygments.lexers.gcodelexer', 'g-code', ('gcode',), ('*.gcode',), ()),
|
||||
'GenshiLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
|
||||
'GenshiTextLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
|
||||
'GettextLexer': ('pip._vendor.pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
|
||||
'GherkinLexer': ('pip._vendor.pygments.lexers.testing', 'Gherkin', ('gherkin', 'cucumber'), ('*.feature',), ('text/x-gherkin',)),
|
||||
'GleamLexer': ('pip._vendor.pygments.lexers.gleam', 'Gleam', ('gleam',), ('*.gleam',), ('text/x-gleam',)),
|
||||
'GnuplotLexer': ('pip._vendor.pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
|
||||
'GoLexer': ('pip._vendor.pygments.lexers.go', 'Go', ('go', 'golang'), ('*.go',), ('text/x-gosrc',)),
|
||||
'GoloLexer': ('pip._vendor.pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
|
||||
'GoodDataCLLexer': ('pip._vendor.pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
|
||||
'GoogleSqlLexer': ('pip._vendor.pygments.lexers.sql', 'GoogleSQL', ('googlesql', 'zetasql'), ('*.googlesql', '*.googlesql.sql'), ('text/x-google-sql', 'text/x-google-sql-aux')),
|
||||
'GosuLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
|
||||
'GosuTemplateLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
|
||||
'GraphQLLexer': ('pip._vendor.pygments.lexers.graphql', 'GraphQL', ('graphql',), ('*.graphql',), ()),
|
||||
'GraphvizLexer': ('pip._vendor.pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')),
|
||||
'GroffLexer': ('pip._vendor.pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')),
|
||||
'GroovyLexer': ('pip._vendor.pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
|
||||
'HLSLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
|
||||
'HTMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()),
|
||||
'HamlLexer': ('pip._vendor.pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
|
||||
'HandlebarsHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
|
||||
'HandlebarsLexer': ('pip._vendor.pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
|
||||
'HareLexer': ('pip._vendor.pygments.lexers.hare', 'Hare', ('hare',), ('*.ha',), ('text/x-hare',)),
|
||||
'HaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
|
||||
'HaxeLexer': ('pip._vendor.pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
|
||||
'HexdumpLexer': ('pip._vendor.pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
|
||||
'HsailLexer': ('pip._vendor.pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
|
||||
'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), ('*Spec.hs',), ()),
|
||||
'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')),
|
||||
'HtmlGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
|
||||
'HtmlLexer': ('pip._vendor.pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
|
||||
'HtmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
|
||||
'HtmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
|
||||
'HttpLexer': ('pip._vendor.pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
|
||||
'HxmlLexer': ('pip._vendor.pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
|
||||
'HyLexer': ('pip._vendor.pygments.lexers.lisp', 'Hy', ('hylang', 'hy'), ('*.hy',), ('text/x-hy', 'application/x-hy')),
|
||||
'HybrisLexer': ('pip._vendor.pygments.lexers.scripting', 'Hybris', ('hybris',), ('*.hyb',), ('text/x-hybris', 'application/x-hybris')),
|
||||
'IDLLexer': ('pip._vendor.pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
|
||||
'IconLexer': ('pip._vendor.pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()),
|
||||
'IdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
|
||||
'IgorLexer': ('pip._vendor.pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
|
||||
'Inform6Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
|
||||
'Inform6TemplateLexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
|
||||
'Inform7Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
|
||||
'IniLexer': ('pip._vendor.pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf', '.editorconfig'), ('text/x-ini', 'text/inf')),
|
||||
'IoLexer': ('pip._vendor.pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
|
||||
'IokeLexer': ('pip._vendor.pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
|
||||
'IrcLogsLexer': ('pip._vendor.pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
|
||||
'IsabelleLexer': ('pip._vendor.pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
|
||||
'JLexer': ('pip._vendor.pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
|
||||
'JMESPathLexer': ('pip._vendor.pygments.lexers.jmespath', 'JMESPath', ('jmespath', 'jp'), ('*.jp',), ()),
|
||||
'JSLTLexer': ('pip._vendor.pygments.lexers.jslt', 'JSLT', ('jslt',), ('*.jslt',), ('text/x-jslt',)),
|
||||
'JagsLexer': ('pip._vendor.pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
|
||||
'JanetLexer': ('pip._vendor.pygments.lexers.lisp', 'Janet', ('janet',), ('*.janet', '*.jdn'), ('text/x-janet', 'application/x-janet')),
|
||||
'JasminLexer': ('pip._vendor.pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
|
||||
'JavaLexer': ('pip._vendor.pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
|
||||
'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
|
||||
'JavascriptErbLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
|
||||
'JavascriptGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
|
||||
'JavascriptLexer': ('pip._vendor.pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
|
||||
'JavascriptPhpLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
|
||||
'JavascriptSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
|
||||
'JavascriptUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()),
|
||||
'JclLexer': ('pip._vendor.pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
|
||||
'JsgfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
|
||||
'Json5Lexer': ('pip._vendor.pygments.lexers.json5', 'JSON5', ('json5',), ('*.json5',), ()),
|
||||
'JsonBareObjectLexer': ('pip._vendor.pygments.lexers.data', 'JSONBareObject', (), (), ()),
|
||||
'JsonLdLexer': ('pip._vendor.pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
|
||||
'JsonLexer': ('pip._vendor.pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', '*.jsonl', '*.ndjson', 'Pipfile.lock'), ('application/json', 'application/json-object', 'application/x-ndjson', 'application/jsonl', 'application/json-seq')),
|
||||
'JsonnetLexer': ('pip._vendor.pygments.lexers.jsonnet', 'Jsonnet', ('jsonnet',), ('*.jsonnet', '*.libsonnet'), ()),
|
||||
'JspLexer': ('pip._vendor.pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
|
||||
'JsxLexer': ('pip._vendor.pygments.lexers.jsx', 'JSX', ('jsx', 'react'), ('*.jsx', '*.react'), ('text/jsx', 'text/typescript-jsx')),
|
||||
'JuliaConsoleLexer': ('pip._vendor.pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()),
|
||||
'JuliaLexer': ('pip._vendor.pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
|
||||
'JuttleLexer': ('pip._vendor.pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
|
||||
'KLexer': ('pip._vendor.pygments.lexers.q', 'K', ('k',), ('*.k',), ()),
|
||||
'KalLexer': ('pip._vendor.pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
|
||||
'KconfigLexer': ('pip._vendor.pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
|
||||
'KernelLogLexer': ('pip._vendor.pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()),
|
||||
'KokaLexer': ('pip._vendor.pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
|
||||
'KotlinLexer': ('pip._vendor.pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)),
|
||||
'KuinLexer': ('pip._vendor.pygments.lexers.kuin', 'Kuin', ('kuin',), ('*.kn',), ()),
|
||||
'KustoLexer': ('pip._vendor.pygments.lexers.kusto', 'Kusto', ('kql', 'kusto'), ('*.kql', '*.kusto', '.csl'), ()),
|
||||
'LSLLexer': ('pip._vendor.pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
|
||||
'LassoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
|
||||
'LassoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
|
||||
'LassoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Lasso', ('javascript+lasso', 'js+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
|
||||
'LassoLexer': ('pip._vendor.pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
|
||||
'LassoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
|
||||
'LdaprcLexer': ('pip._vendor.pygments.lexers.ldap', 'LDAP configuration file', ('ldapconf', 'ldaprc'), ('.ldaprc', 'ldaprc', 'ldap.conf'), ('text/x-ldapconf',)),
|
||||
'LdifLexer': ('pip._vendor.pygments.lexers.ldap', 'LDIF', ('ldif',), ('*.ldif',), ('text/x-ldif',)),
|
||||
'Lean3Lexer': ('pip._vendor.pygments.lexers.lean', 'Lean', ('lean', 'lean3'), ('*.lean',), ('text/x-lean', 'text/x-lean3')),
|
||||
'Lean4Lexer': ('pip._vendor.pygments.lexers.lean', 'Lean4', ('lean4',), ('*.lean',), ('text/x-lean4',)),
|
||||
'LessCssLexer': ('pip._vendor.pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
|
||||
'LighttpdConfLexer': ('pip._vendor.pygments.lexers.configs', 'Lighttpd configuration file', ('lighttpd', 'lighty'), ('lighttpd.conf',), ('text/x-lighttpd-conf',)),
|
||||
'LilyPondLexer': ('pip._vendor.pygments.lexers.lilypond', 'LilyPond', ('lilypond',), ('*.ly',), ()),
|
||||
'LimboLexer': ('pip._vendor.pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
|
||||
'LiquidLexer': ('pip._vendor.pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
|
||||
'LiterateAgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Agda', ('literate-agda', 'lagda'), ('*.lagda',), ('text/x-literate-agda',)),
|
||||
'LiterateCryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Cryptol', ('literate-cryptol', 'lcryptol', 'lcry'), ('*.lcry',), ('text/x-literate-cryptol',)),
|
||||
'LiterateHaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Haskell', ('literate-haskell', 'lhaskell', 'lhs'), ('*.lhs',), ('text/x-literate-haskell',)),
|
||||
'LiterateIdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Idris', ('literate-idris', 'lidris', 'lidr'), ('*.lidr',), ('text/x-literate-idris',)),
|
||||
'LiveScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'LiveScript', ('livescript', 'live-script'), ('*.ls',), ('text/livescript',)),
|
||||
'LlvmLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
|
||||
'LlvmMirBodyLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()),
|
||||
'LlvmMirLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()),
|
||||
'LogosLexer': ('pip._vendor.pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
|
||||
'LogtalkLexer': ('pip._vendor.pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
|
||||
'LuaLexer': ('pip._vendor.pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
|
||||
'LuauLexer': ('pip._vendor.pygments.lexers.scripting', 'Luau', ('luau',), ('*.luau',), ()),
|
||||
'MCFunctionLexer': ('pip._vendor.pygments.lexers.minecraft', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)),
|
||||
'MCSchemaLexer': ('pip._vendor.pygments.lexers.minecraft', 'MCSchema', ('mcschema',), ('*.mcschema',), ('text/mcschema',)),
|
||||
'MIMELexer': ('pip._vendor.pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
|
||||
'MIPSLexer': ('pip._vendor.pygments.lexers.mips', 'MIPS', ('mips',), ('*.mips', '*.MIPS'), ()),
|
||||
'MOOCodeLexer': ('pip._vendor.pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
|
||||
'MSDOSSessionLexer': ('pip._vendor.pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
|
||||
'Macaulay2Lexer': ('pip._vendor.pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()),
|
||||
'MakefileLexer': ('pip._vendor.pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
|
||||
'MakoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
|
||||
'MakoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
|
||||
'MakoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Mako', ('javascript+mako', 'js+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
|
||||
'MakoLexer': ('pip._vendor.pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
|
||||
'MakoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
|
||||
'MapleLexer': ('pip._vendor.pygments.lexers.maple', 'Maple', ('maple',), ('*.mpl', '*.mi', '*.mm'), ('text/x-maple',)),
|
||||
'MaqlLexer': ('pip._vendor.pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
|
||||
'MarkdownLexer': ('pip._vendor.pygments.lexers.markup', 'Markdown', ('markdown', 'md'), ('*.md', '*.markdown'), ('text/x-markdown',)),
|
||||
'MaskLexer': ('pip._vendor.pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
|
||||
'MasonLexer': ('pip._vendor.pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
|
||||
'MathematicaLexer': ('pip._vendor.pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
|
||||
'MatlabLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
|
||||
'MatlabSessionLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
|
||||
'MaximaLexer': ('pip._vendor.pygments.lexers.maxima', 'Maxima', ('maxima', 'macsyma'), ('*.mac', '*.max'), ()),
|
||||
'MesonLexer': ('pip._vendor.pygments.lexers.meson', 'Meson', ('meson', 'meson.build'), ('meson.build', 'meson_options.txt'), ('text/x-meson',)),
|
||||
'MiniDLexer': ('pip._vendor.pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
|
||||
'MiniScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MiniScript', ('miniscript', 'ms'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')),
|
||||
'ModelicaLexer': ('pip._vendor.pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
|
||||
'Modula2Lexer': ('pip._vendor.pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
|
||||
'MoinWikiLexer': ('pip._vendor.pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
|
||||
'MojoLexer': ('pip._vendor.pygments.lexers.mojo', 'Mojo', ('mojo', '🔥'), ('*.mojo', '*.🔥'), ('text/x-mojo', 'application/x-mojo')),
|
||||
'MonkeyLexer': ('pip._vendor.pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
|
||||
'MonteLexer': ('pip._vendor.pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
|
||||
'MoonScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MoonScript', ('moonscript', 'moon'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
|
||||
'MoselLexer': ('pip._vendor.pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()),
|
||||
'MozPreprocCssLexer': ('pip._vendor.pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
|
||||
'MozPreprocHashLexer': ('pip._vendor.pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
|
||||
'MozPreprocJavascriptLexer': ('pip._vendor.pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
|
||||
'MozPreprocPercentLexer': ('pip._vendor.pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
|
||||
'MozPreprocXulLexer': ('pip._vendor.pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
|
||||
'MqlLexer': ('pip._vendor.pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
|
||||
'MscgenLexer': ('pip._vendor.pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
|
||||
'MuPADLexer': ('pip._vendor.pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
|
||||
'MxmlLexer': ('pip._vendor.pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
|
||||
'MySqlLexer': ('pip._vendor.pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
|
||||
'MyghtyCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
|
||||
'MyghtyHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
|
||||
'MyghtyJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Myghty', ('javascript+myghty', 'js+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
|
||||
'MyghtyLexer': ('pip._vendor.pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
|
||||
'MyghtyXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
|
||||
'NCLLexer': ('pip._vendor.pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
|
||||
'NSISLexer': ('pip._vendor.pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
|
||||
'NasmLexer': ('pip._vendor.pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM', '*.nasm'), ('text/x-nasm',)),
|
||||
'NasmObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
|
||||
'NemerleLexer': ('pip._vendor.pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
|
||||
'NesCLexer': ('pip._vendor.pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
|
||||
'NestedTextLexer': ('pip._vendor.pygments.lexers.configs', 'NestedText', ('nestedtext', 'nt'), ('*.nt',), ()),
|
||||
'NewLispLexer': ('pip._vendor.pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
|
||||
'NewspeakLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
|
||||
'NginxConfLexer': ('pip._vendor.pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
|
||||
'NimrodLexer': ('pip._vendor.pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
|
||||
'NitLexer': ('pip._vendor.pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
|
||||
'NixLexer': ('pip._vendor.pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
|
||||
'NodeConsoleLexer': ('pip._vendor.pygments.lexers.javascript', 'Node.js REPL console session', ('nodejsrepl',), (), ('text/x-nodejsrepl',)),
|
||||
'NotmuchLexer': ('pip._vendor.pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()),
|
||||
'NuSMVLexer': ('pip._vendor.pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
|
||||
'NumPyLexer': ('pip._vendor.pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
|
||||
'NumbaIRLexer': ('pip._vendor.pygments.lexers.numbair', 'Numba_IR', ('numba_ir', 'numbair'), ('*.numba_ir',), ('text/x-numba_ir', 'text/x-numbair')),
|
||||
'ObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
|
||||
'ObjectiveCLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
|
||||
'ObjectiveCppLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
|
||||
'ObjectiveJLexer': ('pip._vendor.pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
|
||||
'OcamlLexer': ('pip._vendor.pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
|
||||
'OctaveLexer': ('pip._vendor.pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
|
||||
'OdinLexer': ('pip._vendor.pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
|
||||
'OmgIdlLexer': ('pip._vendor.pygments.lexers.c_like', 'OMG Interface Definition Language', ('omg-idl',), ('*.idl', '*.pidl'), ()),
|
||||
'OocLexer': ('pip._vendor.pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
|
||||
'OpaLexer': ('pip._vendor.pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
|
||||
'OpenEdgeLexer': ('pip._vendor.pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
|
||||
'OpenScadLexer': ('pip._vendor.pygments.lexers.openscad', 'OpenSCAD', ('openscad',), ('*.scad',), ('application/x-openscad',)),
|
||||
'OrgLexer': ('pip._vendor.pygments.lexers.markup', 'Org Mode', ('org', 'orgmode', 'org-mode'), ('*.org',), ('text/org',)),
|
||||
'OutputLexer': ('pip._vendor.pygments.lexers.special', 'Text output', ('output',), (), ()),
|
||||
'PacmanConfLexer': ('pip._vendor.pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
|
||||
'PanLexer': ('pip._vendor.pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
|
||||
'ParaSailLexer': ('pip._vendor.pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
|
||||
'PawnLexer': ('pip._vendor.pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
|
||||
'PddlLexer': ('pip._vendor.pygments.lexers.pddl', 'PDDL', ('pddl',), ('*.pddl',), ()),
|
||||
'PegLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)),
|
||||
'Perl6Lexer': ('pip._vendor.pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')),
|
||||
'PerlLexer': ('pip._vendor.pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')),
|
||||
'PhixLexer': ('pip._vendor.pygments.lexers.phix', 'Phix', ('phix',), ('*.exw',), ('text/x-phix',)),
|
||||
'PhpLexer': ('pip._vendor.pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
|
||||
'PigLexer': ('pip._vendor.pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
|
||||
'PikeLexer': ('pip._vendor.pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
|
||||
'PkgConfigLexer': ('pip._vendor.pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
|
||||
'PlPgsqlLexer': ('pip._vendor.pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
|
||||
'PointlessLexer': ('pip._vendor.pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()),
|
||||
'PonyLexer': ('pip._vendor.pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
|
||||
'PortugolLexer': ('pip._vendor.pygments.lexers.pascal', 'Portugol', ('portugol',), ('*.alg', '*.portugol'), ()),
|
||||
'PostScriptLexer': ('pip._vendor.pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
|
||||
'PostgresConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
|
||||
'PostgresExplainLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL EXPLAIN dialect', ('postgres-explain',), ('*.explain',), ('text/x-postgresql-explain',)),
|
||||
'PostgresLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
|
||||
'PovrayLexer': ('pip._vendor.pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
|
||||
'PowerShellLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell', ('powershell', 'pwsh', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
|
||||
'PowerShellSessionLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell Session', ('pwsh-session', 'ps1con'), (), ()),
|
||||
'PraatLexer': ('pip._vendor.pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
|
||||
'ProcfileLexer': ('pip._vendor.pygments.lexers.procfile', 'Procfile', ('procfile',), ('Procfile',), ()),
|
||||
'PrologLexer': ('pip._vendor.pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
|
||||
'PromQLLexer': ('pip._vendor.pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()),
|
||||
'PromelaLexer': ('pip._vendor.pygments.lexers.c_like', 'Promela', ('promela',), ('*.pml', '*.prom', '*.prm', '*.promela', '*.pr', '*.pm'), ('text/x-promela',)),
|
||||
'PropertiesLexer': ('pip._vendor.pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
|
||||
'ProtoBufLexer': ('pip._vendor.pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
|
||||
'PrqlLexer': ('pip._vendor.pygments.lexers.prql', 'PRQL', ('prql',), ('*.prql',), ('application/prql', 'application/x-prql')),
|
||||
'PsyshConsoleLexer': ('pip._vendor.pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()),
|
||||
'PtxLexer': ('pip._vendor.pygments.lexers.ptx', 'PTX', ('ptx',), ('*.ptx',), ('text/x-ptx',)),
|
||||
'PugLexer': ('pip._vendor.pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
|
||||
'PuppetLexer': ('pip._vendor.pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
|
||||
'PyPyLogLexer': ('pip._vendor.pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
|
||||
'Python2Lexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
|
||||
'Python2TracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
|
||||
'PythonConsoleLexer': ('pip._vendor.pygments.lexers.python', 'Python console session', ('pycon', 'python-console'), (), ('text/x-python-doctest',)),
|
||||
'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3', 'bazel', 'starlark', 'pyi'), ('*.py', '*.pyw', '*.pyi', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
|
||||
'PythonTracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
|
||||
'PythonUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()),
|
||||
'QBasicLexer': ('pip._vendor.pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
|
||||
'QLexer': ('pip._vendor.pygments.lexers.q', 'Q', ('q',), ('*.q',), ()),
|
||||
'QVToLexer': ('pip._vendor.pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
|
||||
'QlikLexer': ('pip._vendor.pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()),
|
||||
'QmlLexer': ('pip._vendor.pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
|
||||
'RConsoleLexer': ('pip._vendor.pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
|
||||
'RNCCompactLexer': ('pip._vendor.pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()),
|
||||
'RPMSpecLexer': ('pip._vendor.pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
|
||||
'RacketLexer': ('pip._vendor.pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
|
||||
'RagelCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
|
||||
'RagelCppLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
|
||||
'RagelDLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
|
||||
'RagelEmbeddedLexer': ('pip._vendor.pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
|
||||
'RagelJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
|
||||
'RagelLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
|
||||
'RagelObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
|
||||
'RagelRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
|
||||
'RawTokenLexer': ('pip._vendor.pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)),
|
||||
'RdLexer': ('pip._vendor.pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
|
||||
'ReasonLexer': ('pip._vendor.pygments.lexers.ml', 'ReasonML', ('reasonml', 'reason'), ('*.re', '*.rei'), ('text/x-reasonml',)),
|
||||
'RebolLexer': ('pip._vendor.pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
|
||||
'RedLexer': ('pip._vendor.pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
|
||||
'RedcodeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
|
||||
'RegeditLexer': ('pip._vendor.pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
|
||||
'RegoLexer': ('pip._vendor.pygments.lexers.rego', 'Rego', ('rego',), ('*.rego',), ('text/x-rego',)),
|
||||
'ResourceLexer': ('pip._vendor.pygments.lexers.resource', 'ResourceBundle', ('resourcebundle', 'resource'), (), ()),
|
||||
'RexxLexer': ('pip._vendor.pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
|
||||
'RhtmlLexer': ('pip._vendor.pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
|
||||
'RideLexer': ('pip._vendor.pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)),
|
||||
'RitaLexer': ('pip._vendor.pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)),
|
||||
'RoboconfGraphLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
|
||||
'RoboconfInstancesLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
|
||||
'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)),
|
||||
'RqlLexer': ('pip._vendor.pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
|
||||
'RslLexer': ('pip._vendor.pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
|
||||
'RstLexer': ('pip._vendor.pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
|
||||
'RtsLexer': ('pip._vendor.pygments.lexers.trafficscript', 'TrafficScript', ('trafficscript', 'rts'), ('*.rts',), ()),
|
||||
'RubyConsoleLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
|
||||
'RubyLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby', ('ruby', 'rb', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile'), ('text/x-ruby', 'application/x-ruby')),
|
||||
'RustLexer': ('pip._vendor.pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')),
|
||||
'SASLexer': ('pip._vendor.pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
|
||||
'SLexer': ('pip._vendor.pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
|
||||
'SMLLexer': ('pip._vendor.pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
|
||||
'SNBTLexer': ('pip._vendor.pygments.lexers.minecraft', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)),
|
||||
'SarlLexer': ('pip._vendor.pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
|
||||
'SassLexer': ('pip._vendor.pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
|
||||
'SaviLexer': ('pip._vendor.pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()),
|
||||
'ScalaLexer': ('pip._vendor.pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
|
||||
'ScamlLexer': ('pip._vendor.pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
|
||||
'ScdocLexer': ('pip._vendor.pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()),
|
||||
'SchemeLexer': ('pip._vendor.pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
|
||||
'ScilabLexer': ('pip._vendor.pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
|
||||
'ScssLexer': ('pip._vendor.pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
|
||||
'SedLexer': ('pip._vendor.pygments.lexers.textedit', 'Sed', ('sed', 'gsed', 'ssed'), ('*.sed', '*.[gs]sed'), ('text/x-sed',)),
|
||||
'ShExCLexer': ('pip._vendor.pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)),
|
||||
'ShenLexer': ('pip._vendor.pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
|
||||
'SieveLexer': ('pip._vendor.pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()),
|
||||
'SilverLexer': ('pip._vendor.pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
|
||||
'SingularityLexer': ('pip._vendor.pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()),
|
||||
'SlashLexer': ('pip._vendor.pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()),
|
||||
'SlimLexer': ('pip._vendor.pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
|
||||
'SlurmBashLexer': ('pip._vendor.pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
|
||||
'SmaliLexer': ('pip._vendor.pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
|
||||
'SmalltalkLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
|
||||
'SmartGameFormatLexer': ('pip._vendor.pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
|
||||
'SmartyLexer': ('pip._vendor.pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
|
||||
'SmithyLexer': ('pip._vendor.pygments.lexers.smithy', 'Smithy', ('smithy',), ('*.smithy',), ()),
|
||||
'SnobolLexer': ('pip._vendor.pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
|
||||
'SnowballLexer': ('pip._vendor.pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
|
||||
'SolidityLexer': ('pip._vendor.pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()),
|
||||
'SoongLexer': ('pip._vendor.pygments.lexers.soong', 'Soong', ('androidbp', 'bp', 'soong'), ('Android.bp',), ()),
|
||||
'SophiaLexer': ('pip._vendor.pygments.lexers.sophia', 'Sophia', ('sophia',), ('*.aes',), ()),
|
||||
'SourcePawnLexer': ('pip._vendor.pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
|
||||
'SourcesListLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()),
|
||||
'SparqlLexer': ('pip._vendor.pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
|
||||
'SpiceLexer': ('pip._vendor.pygments.lexers.spice', 'Spice', ('spice', 'spicelang'), ('*.spice',), ('text/x-spice',)),
|
||||
'SqlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'SQL+Jinja', ('sql+jinja',), ('*.sql', '*.sql.j2', '*.sql.jinja2'), ()),
|
||||
'SqlLexer': ('pip._vendor.pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
|
||||
'SqliteConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
|
||||
'SquidConfLexer': ('pip._vendor.pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
|
||||
'SrcinfoLexer': ('pip._vendor.pygments.lexers.srcinfo', 'Srcinfo', ('srcinfo',), ('.SRCINFO',), ()),
|
||||
'SspLexer': ('pip._vendor.pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
|
||||
'StanLexer': ('pip._vendor.pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
|
||||
'StataLexer': ('pip._vendor.pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
|
||||
'SuperColliderLexer': ('pip._vendor.pygments.lexers.supercollider', 'SuperCollider', ('supercollider', 'sc'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
|
||||
'SwiftLexer': ('pip._vendor.pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
|
||||
'SwigLexer': ('pip._vendor.pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
|
||||
'SystemVerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
|
||||
'SystemdLexer': ('pip._vendor.pygments.lexers.configs', 'Systemd', ('systemd',), ('*.service', '*.socket', '*.device', '*.mount', '*.automount', '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope'), ()),
|
||||
'TAPLexer': ('pip._vendor.pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
|
||||
'TNTLexer': ('pip._vendor.pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()),
|
||||
'TOMLLexer': ('pip._vendor.pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ('application/toml',)),
|
||||
'TableGenLexer': ('pip._vendor.pygments.lexers.tablegen', 'TableGen', ('tablegen', 'td'), ('*.td',), ()),
|
||||
'TactLexer': ('pip._vendor.pygments.lexers.tact', 'Tact', ('tact',), ('*.tact',), ()),
|
||||
'Tads3Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
|
||||
'TalLexer': ('pip._vendor.pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)),
|
||||
'TasmLexer': ('pip._vendor.pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
|
||||
'TclLexer': ('pip._vendor.pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
|
||||
'TcshLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
|
||||
'TcshSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
|
||||
'TeaTemplateLexer': ('pip._vendor.pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
|
||||
'TealLexer': ('pip._vendor.pygments.lexers.teal', 'teal', ('teal',), ('*.teal',), ()),
|
||||
'TeraTermLexer': ('pip._vendor.pygments.lexers.teraterm', 'Tera Term macro', ('teratermmacro', 'teraterm', 'ttl'), ('*.ttl',), ('text/x-teratermmacro',)),
|
||||
'TermcapLexer': ('pip._vendor.pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
|
||||
'TerminfoLexer': ('pip._vendor.pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
|
||||
'TerraformLexer': ('pip._vendor.pygments.lexers.configs', 'Terraform', ('terraform', 'tf', 'hcl'), ('*.tf', '*.hcl'), ('application/x-tf', 'application/x-terraform')),
|
||||
'TexLexer': ('pip._vendor.pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
|
||||
'TextLexer': ('pip._vendor.pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
|
||||
'ThingsDBLexer': ('pip._vendor.pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()),
|
||||
'ThriftLexer': ('pip._vendor.pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
|
||||
'TiddlyWiki5Lexer': ('pip._vendor.pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)),
|
||||
'TlbLexer': ('pip._vendor.pygments.lexers.tlb', 'Tl-b', ('tlb',), ('*.tlb',), ()),
|
||||
'TlsLexer': ('pip._vendor.pygments.lexers.tls', 'TLS Presentation Language', ('tls',), (), ()),
|
||||
'TodotxtLexer': ('pip._vendor.pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
|
||||
'TransactSqlLexer': ('pip._vendor.pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
|
||||
'TreetopLexer': ('pip._vendor.pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
|
||||
'TsxLexer': ('pip._vendor.pygments.lexers.jsx', 'TSX', ('tsx',), ('*.tsx',), ('text/typescript-tsx',)),
|
||||
'TurtleLexer': ('pip._vendor.pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
|
||||
'TwigHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
|
||||
'TwigLexer': ('pip._vendor.pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
|
||||
'TypeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'TypeScript', ('typescript', 'ts'), ('*.ts',), ('application/x-typescript', 'text/x-typescript')),
|
||||
'TypoScriptCssDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
|
||||
'TypoScriptHtmlDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
|
||||
'TypoScriptLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
|
||||
'TypstLexer': ('pip._vendor.pygments.lexers.typst', 'Typst', ('typst',), ('*.typ',), ('text/x-typst',)),
|
||||
'UL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()),
|
||||
'UcodeLexer': ('pip._vendor.pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
|
||||
'UniconLexer': ('pip._vendor.pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
|
||||
'UnixConfigLexer': ('pip._vendor.pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()),
|
||||
'UrbiscriptLexer': ('pip._vendor.pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
|
||||
'UrlEncodedLexer': ('pip._vendor.pygments.lexers.html', 'urlencoded', ('urlencoded',), (), ('application/x-www-form-urlencoded',)),
|
||||
'UsdLexer': ('pip._vendor.pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()),
|
||||
'VBScriptLexer': ('pip._vendor.pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
|
||||
'VCLLexer': ('pip._vendor.pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
|
||||
'VCLSnippetLexer': ('pip._vendor.pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
|
||||
'VCTreeStatusLexer': ('pip._vendor.pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
|
||||
'VGLLexer': ('pip._vendor.pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
|
||||
'ValaLexer': ('pip._vendor.pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
|
||||
'VbNetAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
|
||||
'VbNetLexer': ('pip._vendor.pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet', 'lobas', 'oobas', 'sobas', 'visual-basic', 'visualbasic'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
|
||||
'VelocityHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
|
||||
'VelocityLexer': ('pip._vendor.pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
|
||||
'VelocityXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
|
||||
'VerifpalLexer': ('pip._vendor.pygments.lexers.verifpal', 'Verifpal', ('verifpal',), ('*.vp',), ('text/x-verifpal',)),
|
||||
'VerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
|
||||
'VhdlLexer': ('pip._vendor.pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
|
||||
'VimLexer': ('pip._vendor.pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
|
||||
'VisualPrologGrammarLexer': ('pip._vendor.pygments.lexers.vip', 'Visual Prolog Grammar', ('visualprologgrammar',), ('*.vipgrm',), ()),
|
||||
'VisualPrologLexer': ('pip._vendor.pygments.lexers.vip', 'Visual Prolog', ('visualprolog',), ('*.pro', '*.cl', '*.i', '*.pack', '*.ph'), ()),
|
||||
'VueLexer': ('pip._vendor.pygments.lexers.html', 'Vue', ('vue',), ('*.vue',), ()),
|
||||
'VyperLexer': ('pip._vendor.pygments.lexers.vyper', 'Vyper', ('vyper',), ('*.vy',), ()),
|
||||
'WDiffLexer': ('pip._vendor.pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
|
||||
'WatLexer': ('pip._vendor.pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()),
|
||||
'WebIDLLexer': ('pip._vendor.pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
|
||||
'WgslLexer': ('pip._vendor.pygments.lexers.wgsl', 'WebGPU Shading Language', ('wgsl',), ('*.wgsl',), ('text/wgsl',)),
|
||||
'WhileyLexer': ('pip._vendor.pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
|
||||
'WikitextLexer': ('pip._vendor.pygments.lexers.markup', 'Wikitext', ('wikitext', 'mediawiki'), (), ('text/x-wiki',)),
|
||||
'WoWTocLexer': ('pip._vendor.pygments.lexers.wowtoc', 'World of Warcraft TOC', ('wowtoc',), ('*.toc',), ()),
|
||||
'WrenLexer': ('pip._vendor.pygments.lexers.wren', 'Wren', ('wren',), ('*.wren',), ()),
|
||||
'X10Lexer': ('pip._vendor.pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
|
||||
'XMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()),
|
||||
'XQueryLexer': ('pip._vendor.pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
|
||||
'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')),
|
||||
'XmlErbLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)),
|
||||
'XmlLexer': ('pip._vendor.pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
|
||||
'XmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
|
||||
'XmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
|
||||
'XorgLexer': ('pip._vendor.pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
|
||||
'XppLexer': ('pip._vendor.pygments.lexers.dotnet', 'X++', ('xpp', 'x++'), ('*.xpp',), ()),
|
||||
'XsltLexer': ('pip._vendor.pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
|
||||
'XtendLexer': ('pip._vendor.pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
|
||||
'XtlangLexer': ('pip._vendor.pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
|
||||
'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')),
|
||||
'YamlLexer': ('pip._vendor.pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
|
||||
'YangLexer': ('pip._vendor.pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)),
|
||||
'YaraLexer': ('pip._vendor.pygments.lexers.yara', 'YARA', ('yara', 'yar'), ('*.yar',), ('text/x-yara',)),
|
||||
'ZeekLexer': ('pip._vendor.pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()),
|
||||
'ZephirLexer': ('pip._vendor.pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
|
||||
'ZigLexer': ('pip._vendor.pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)),
|
||||
'apdlexer': ('pip._vendor.pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()),
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
pygments.modeline
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
A simple modeline parser (based on pymodeline).
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
__all__ = ['get_filetype_from_buffer']
|
||||
|
||||
|
||||
modeline_re = re.compile(r'''
|
||||
(?: vi | vim | ex ) (?: [<=>]? \d* )? :
|
||||
.* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ )
|
||||
''', re.VERBOSE)
|
||||
|
||||
|
||||
def get_filetype_from_line(l): # noqa: E741
|
||||
m = modeline_re.search(l)
|
||||
if m:
|
||||
return m.group(1)
|
||||
|
||||
|
||||
def get_filetype_from_buffer(buf, max_lines=5):
|
||||
"""
|
||||
Scan the buffer for modelines and return filetype if one is found.
|
||||
"""
|
||||
lines = buf.splitlines()
|
||||
for line in lines[-1:-max_lines-1:-1]:
|
||||
ret = get_filetype_from_line(line)
|
||||
if ret:
|
||||
return ret
|
||||
for i in range(max_lines, -1, -1):
|
||||
if i < len(lines):
|
||||
ret = get_filetype_from_line(lines[i])
|
||||
if ret:
|
||||
return ret
|
||||
|
||||
return None
|
||||
@@ -0,0 +1,72 @@
|
||||
"""
|
||||
pygments.plugin
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Pygments plugin interface.
|
||||
|
||||
lexer plugins::
|
||||
|
||||
[pygments.lexers]
|
||||
yourlexer = yourmodule:YourLexer
|
||||
|
||||
formatter plugins::
|
||||
|
||||
[pygments.formatters]
|
||||
yourformatter = yourformatter:YourFormatter
|
||||
/.ext = yourformatter:YourFormatter
|
||||
|
||||
As you can see, you can define extensions for the formatter
|
||||
with a leading slash.
|
||||
|
||||
syntax plugins::
|
||||
|
||||
[pygments.styles]
|
||||
yourstyle = yourstyle:YourStyle
|
||||
|
||||
filter plugin::
|
||||
|
||||
[pygments.filter]
|
||||
yourfilter = yourfilter:YourFilter
|
||||
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
from importlib.metadata import entry_points
|
||||
|
||||
LEXER_ENTRY_POINT = 'pygments.lexers'
|
||||
FORMATTER_ENTRY_POINT = 'pygments.formatters'
|
||||
STYLE_ENTRY_POINT = 'pygments.styles'
|
||||
FILTER_ENTRY_POINT = 'pygments.filters'
|
||||
|
||||
|
||||
def iter_entry_points(group_name):
|
||||
groups = entry_points()
|
||||
if hasattr(groups, 'select'):
|
||||
# New interface in Python 3.10 and newer versions of the
|
||||
# importlib_metadata backport.
|
||||
return groups.select(group=group_name)
|
||||
else:
|
||||
# Older interface, deprecated in Python 3.10 and recent
|
||||
# importlib_metadata, but we need it in Python 3.8 and 3.9.
|
||||
return groups.get(group_name, [])
|
||||
|
||||
|
||||
def find_plugin_lexers():
|
||||
for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):
|
||||
yield entrypoint.load()
|
||||
|
||||
|
||||
def find_plugin_formatters():
|
||||
for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):
|
||||
yield entrypoint.name, entrypoint.load()
|
||||
|
||||
|
||||
def find_plugin_styles():
|
||||
for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):
|
||||
yield entrypoint.name, entrypoint.load()
|
||||
|
||||
|
||||
def find_plugin_filters():
|
||||
for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):
|
||||
yield entrypoint.name, entrypoint.load()
|
||||
@@ -0,0 +1,91 @@
|
||||
"""
|
||||
pygments.regexopt
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
An algorithm that generates optimized regexes for matching long lists of
|
||||
literal strings.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import re
|
||||
from re import escape
|
||||
from os.path import commonprefix
|
||||
from itertools import groupby
|
||||
from operator import itemgetter
|
||||
|
||||
CS_ESCAPE = re.compile(r'[\[\^\\\-\]]')
|
||||
FIRST_ELEMENT = itemgetter(0)
|
||||
|
||||
|
||||
def make_charset(letters):
|
||||
return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']'
|
||||
|
||||
|
||||
def regex_opt_inner(strings, open_paren):
|
||||
"""Return a regex that matches any string in the sorted list of strings."""
|
||||
close_paren = open_paren and ')' or ''
|
||||
# print strings, repr(open_paren)
|
||||
if not strings:
|
||||
# print '-> nothing left'
|
||||
return ''
|
||||
first = strings[0]
|
||||
if len(strings) == 1:
|
||||
# print '-> only 1 string'
|
||||
return open_paren + escape(first) + close_paren
|
||||
if not first:
|
||||
# print '-> first string empty'
|
||||
return open_paren + regex_opt_inner(strings[1:], '(?:') \
|
||||
+ '?' + close_paren
|
||||
if len(first) == 1:
|
||||
# multiple one-char strings? make a charset
|
||||
oneletter = []
|
||||
rest = []
|
||||
for s in strings:
|
||||
if len(s) == 1:
|
||||
oneletter.append(s)
|
||||
else:
|
||||
rest.append(s)
|
||||
if len(oneletter) > 1: # do we have more than one oneletter string?
|
||||
if rest:
|
||||
# print '-> 1-character + rest'
|
||||
return open_paren + regex_opt_inner(rest, '') + '|' \
|
||||
+ make_charset(oneletter) + close_paren
|
||||
# print '-> only 1-character'
|
||||
return open_paren + make_charset(oneletter) + close_paren
|
||||
prefix = commonprefix(strings)
|
||||
if prefix:
|
||||
plen = len(prefix)
|
||||
# we have a prefix for all strings
|
||||
# print '-> prefix:', prefix
|
||||
return open_paren + escape(prefix) \
|
||||
+ regex_opt_inner([s[plen:] for s in strings], '(?:') \
|
||||
+ close_paren
|
||||
# is there a suffix?
|
||||
strings_rev = [s[::-1] for s in strings]
|
||||
suffix = commonprefix(strings_rev)
|
||||
if suffix:
|
||||
slen = len(suffix)
|
||||
# print '-> suffix:', suffix[::-1]
|
||||
return open_paren \
|
||||
+ regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
|
||||
+ escape(suffix[::-1]) + close_paren
|
||||
# recurse on common 1-string prefixes
|
||||
# print '-> last resort'
|
||||
return open_paren + \
|
||||
'|'.join(regex_opt_inner(list(group[1]), '')
|
||||
for group in groupby(strings, lambda s: s[0] == first[0])) \
|
||||
+ close_paren
|
||||
|
||||
|
||||
def regex_opt(strings, prefix='', suffix=''):
|
||||
"""Return a compiled regex that matches any string in the given list.
|
||||
|
||||
The strings to match must be literal strings, not regexes. They will be
|
||||
regex-escaped.
|
||||
|
||||
*prefix* and *suffix* are pre- and appended to the final regex.
|
||||
"""
|
||||
strings = sorted(strings)
|
||||
return prefix + regex_opt_inner(strings, '(') + suffix
|
||||
@@ -0,0 +1,104 @@
|
||||
"""
|
||||
pygments.scanner
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
This library implements a regex based scanner. Some languages
|
||||
like Pascal are easy to parse but have some keywords that
|
||||
depend on the context. Because of this it's impossible to lex
|
||||
that just by using a regular expression lexer like the
|
||||
`RegexLexer`.
|
||||
|
||||
Have a look at the `DelphiLexer` to get an idea of how to use
|
||||
this scanner.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
import re
|
||||
|
||||
|
||||
class EndOfText(RuntimeError):
|
||||
"""
|
||||
Raise if end of text is reached and the user
|
||||
tried to call a match function.
|
||||
"""
|
||||
|
||||
|
||||
class Scanner:
|
||||
"""
|
||||
Simple scanner
|
||||
|
||||
All method patterns are regular expression strings (not
|
||||
compiled expressions!)
|
||||
"""
|
||||
|
||||
def __init__(self, text, flags=0):
|
||||
"""
|
||||
:param text: The text which should be scanned
|
||||
:param flags: default regular expression flags
|
||||
"""
|
||||
self.data = text
|
||||
self.data_length = len(text)
|
||||
self.start_pos = 0
|
||||
self.pos = 0
|
||||
self.flags = flags
|
||||
self.last = None
|
||||
self.match = None
|
||||
self._re_cache = {}
|
||||
|
||||
def eos(self):
|
||||
"""`True` if the scanner reached the end of text."""
|
||||
return self.pos >= self.data_length
|
||||
eos = property(eos, eos.__doc__)
|
||||
|
||||
def check(self, pattern):
|
||||
"""
|
||||
Apply `pattern` on the current position and return
|
||||
the match object. (Doesn't touch pos). Use this for
|
||||
lookahead.
|
||||
"""
|
||||
if self.eos:
|
||||
raise EndOfText()
|
||||
if pattern not in self._re_cache:
|
||||
self._re_cache[pattern] = re.compile(pattern, self.flags)
|
||||
return self._re_cache[pattern].match(self.data, self.pos)
|
||||
|
||||
def test(self, pattern):
|
||||
"""Apply a pattern on the current position and check
|
||||
if it patches. Doesn't touch pos.
|
||||
"""
|
||||
return self.check(pattern) is not None
|
||||
|
||||
def scan(self, pattern):
|
||||
"""
|
||||
Scan the text for the given pattern and update pos/match
|
||||
and related fields. The return value is a boolean that
|
||||
indicates if the pattern matched. The matched value is
|
||||
stored on the instance as ``match``, the last value is
|
||||
stored as ``last``. ``start_pos`` is the position of the
|
||||
pointer before the pattern was matched, ``pos`` is the
|
||||
end position.
|
||||
"""
|
||||
if self.eos:
|
||||
raise EndOfText()
|
||||
if pattern not in self._re_cache:
|
||||
self._re_cache[pattern] = re.compile(pattern, self.flags)
|
||||
self.last = self.match
|
||||
m = self._re_cache[pattern].match(self.data, self.pos)
|
||||
if m is None:
|
||||
return False
|
||||
self.start_pos = m.start()
|
||||
self.pos = m.end()
|
||||
self.match = m.group()
|
||||
return True
|
||||
|
||||
def get_char(self):
|
||||
"""Scan exactly one char."""
|
||||
self.scan('.')
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s %d/%d>' % (
|
||||
self.__class__.__name__,
|
||||
self.pos,
|
||||
self.data_length
|
||||
)
|
||||
@@ -0,0 +1,247 @@
|
||||
"""
|
||||
pygments.sphinxext
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Sphinx extension to generate automatic documentation of lexers,
|
||||
formatters and filters.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.statemachine import ViewList
|
||||
from docutils.parsers.rst import Directive
|
||||
from sphinx.util.nodes import nested_parse_with_titles
|
||||
|
||||
|
||||
MODULEDOC = '''
|
||||
.. module:: %s
|
||||
|
||||
%s
|
||||
%s
|
||||
'''
|
||||
|
||||
LEXERDOC = '''
|
||||
.. class:: %s
|
||||
|
||||
:Short names: %s
|
||||
:Filenames: %s
|
||||
:MIME types: %s
|
||||
|
||||
%s
|
||||
|
||||
%s
|
||||
|
||||
'''
|
||||
|
||||
FMTERDOC = '''
|
||||
.. class:: %s
|
||||
|
||||
:Short names: %s
|
||||
:Filenames: %s
|
||||
|
||||
%s
|
||||
|
||||
'''
|
||||
|
||||
FILTERDOC = '''
|
||||
.. class:: %s
|
||||
|
||||
:Name: %s
|
||||
|
||||
%s
|
||||
|
||||
'''
|
||||
|
||||
|
||||
class PygmentsDoc(Directive):
|
||||
"""
|
||||
A directive to collect all lexers/formatters/filters and generate
|
||||
autoclass directives for them.
|
||||
"""
|
||||
has_content = False
|
||||
required_arguments = 1
|
||||
optional_arguments = 0
|
||||
final_argument_whitespace = False
|
||||
option_spec = {}
|
||||
|
||||
def run(self):
|
||||
self.filenames = set()
|
||||
if self.arguments[0] == 'lexers':
|
||||
out = self.document_lexers()
|
||||
elif self.arguments[0] == 'formatters':
|
||||
out = self.document_formatters()
|
||||
elif self.arguments[0] == 'filters':
|
||||
out = self.document_filters()
|
||||
elif self.arguments[0] == 'lexers_overview':
|
||||
out = self.document_lexers_overview()
|
||||
else:
|
||||
raise Exception('invalid argument for "pygmentsdoc" directive')
|
||||
node = nodes.compound()
|
||||
vl = ViewList(out.split('\n'), source='')
|
||||
nested_parse_with_titles(self.state, vl, node)
|
||||
for fn in self.filenames:
|
||||
self.state.document.settings.record_dependencies.add(fn)
|
||||
return node.children
|
||||
|
||||
def document_lexers_overview(self):
|
||||
"""Generate a tabular overview of all lexers.
|
||||
|
||||
The columns are the lexer name, the extensions handled by this lexer
|
||||
(or "None"), the aliases and a link to the lexer class."""
|
||||
from pip._vendor.pygments.lexers._mapping import LEXERS
|
||||
from pip._vendor.pygments.lexers import find_lexer_class
|
||||
out = []
|
||||
|
||||
table = []
|
||||
|
||||
def format_link(name, url):
|
||||
if url:
|
||||
return f'`{name} <{url}>`_'
|
||||
return name
|
||||
|
||||
for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()):
|
||||
lexer_cls = find_lexer_class(data[1])
|
||||
extensions = lexer_cls.filenames + lexer_cls.alias_filenames
|
||||
|
||||
table.append({
|
||||
'name': format_link(data[1], lexer_cls.url),
|
||||
'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None',
|
||||
'aliases': ', '.join(data[2]),
|
||||
'class': f'{data[0]}.{classname}'
|
||||
})
|
||||
|
||||
column_names = ['name', 'extensions', 'aliases', 'class']
|
||||
column_lengths = [max([len(row[column]) for row in table if row[column]])
|
||||
for column in column_names]
|
||||
|
||||
def write_row(*columns):
|
||||
"""Format a table row"""
|
||||
out = []
|
||||
for length, col in zip(column_lengths, columns):
|
||||
if col:
|
||||
out.append(col.ljust(length))
|
||||
else:
|
||||
out.append(' '*length)
|
||||
|
||||
return ' '.join(out)
|
||||
|
||||
def write_seperator():
|
||||
"""Write a table separator row"""
|
||||
sep = ['='*c for c in column_lengths]
|
||||
return write_row(*sep)
|
||||
|
||||
out.append(write_seperator())
|
||||
out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class'))
|
||||
out.append(write_seperator())
|
||||
for row in table:
|
||||
out.append(write_row(
|
||||
row['name'],
|
||||
row['extensions'],
|
||||
row['aliases'],
|
||||
f':class:`~{row["class"]}`'))
|
||||
out.append(write_seperator())
|
||||
|
||||
return '\n'.join(out)
|
||||
|
||||
def document_lexers(self):
|
||||
from pip._vendor.pygments.lexers._mapping import LEXERS
|
||||
from pip._vendor import pygments
|
||||
import inspect
|
||||
import pathlib
|
||||
|
||||
out = []
|
||||
modules = {}
|
||||
moduledocstrings = {}
|
||||
for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
|
||||
module = data[0]
|
||||
mod = __import__(module, None, None, [classname])
|
||||
self.filenames.add(mod.__file__)
|
||||
cls = getattr(mod, classname)
|
||||
if not cls.__doc__:
|
||||
print(f"Warning: {classname} does not have a docstring.")
|
||||
docstring = cls.__doc__
|
||||
if isinstance(docstring, bytes):
|
||||
docstring = docstring.decode('utf8')
|
||||
|
||||
example_file = getattr(cls, '_example', None)
|
||||
if example_file:
|
||||
p = pathlib.Path(inspect.getabsfile(pygments)).parent.parent /\
|
||||
'tests' / 'examplefiles' / example_file
|
||||
content = p.read_text(encoding='utf-8')
|
||||
if not content:
|
||||
raise Exception(
|
||||
f"Empty example file '{example_file}' for lexer "
|
||||
f"{classname}")
|
||||
|
||||
if data[2]:
|
||||
lexer_name = data[2][0]
|
||||
docstring += '\n\n .. admonition:: Example\n'
|
||||
docstring += f'\n .. code-block:: {lexer_name}\n\n'
|
||||
for line in content.splitlines():
|
||||
docstring += f' {line}\n'
|
||||
|
||||
if cls.version_added:
|
||||
version_line = f'.. versionadded:: {cls.version_added}'
|
||||
else:
|
||||
version_line = ''
|
||||
|
||||
modules.setdefault(module, []).append((
|
||||
classname,
|
||||
', '.join(data[2]) or 'None',
|
||||
', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
|
||||
', '.join(data[4]) or 'None',
|
||||
docstring,
|
||||
version_line))
|
||||
if module not in moduledocstrings:
|
||||
moddoc = mod.__doc__
|
||||
if isinstance(moddoc, bytes):
|
||||
moddoc = moddoc.decode('utf8')
|
||||
moduledocstrings[module] = moddoc
|
||||
|
||||
for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
|
||||
if moduledocstrings[module] is None:
|
||||
raise Exception(f"Missing docstring for {module}")
|
||||
heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
|
||||
out.append(MODULEDOC % (module, heading, '-'*len(heading)))
|
||||
for data in lexers:
|
||||
out.append(LEXERDOC % data)
|
||||
|
||||
return ''.join(out)
|
||||
|
||||
def document_formatters(self):
|
||||
from pip._vendor.pygments.formatters import FORMATTERS
|
||||
|
||||
out = []
|
||||
for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):
|
||||
module = data[0]
|
||||
mod = __import__(module, None, None, [classname])
|
||||
self.filenames.add(mod.__file__)
|
||||
cls = getattr(mod, classname)
|
||||
docstring = cls.__doc__
|
||||
if isinstance(docstring, bytes):
|
||||
docstring = docstring.decode('utf8')
|
||||
heading = cls.__name__
|
||||
out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None',
|
||||
', '.join(data[3]).replace('*', '\\*') or 'None',
|
||||
docstring))
|
||||
return ''.join(out)
|
||||
|
||||
def document_filters(self):
|
||||
from pip._vendor.pygments.filters import FILTERS
|
||||
|
||||
out = []
|
||||
for name, cls in FILTERS.items():
|
||||
self.filenames.add(sys.modules[cls.__module__].__file__)
|
||||
docstring = cls.__doc__
|
||||
if isinstance(docstring, bytes):
|
||||
docstring = docstring.decode('utf8')
|
||||
out.append(FILTERDOC % (cls.__name__, name, docstring))
|
||||
return ''.join(out)
|
||||
|
||||
|
||||
def setup(app):
|
||||
app.add_directive('pygmentsdoc', PygmentsDoc)
|
||||
203
venv/lib/python3.11/site-packages/pip/_vendor/pygments/style.py
Normal file
203
venv/lib/python3.11/site-packages/pip/_vendor/pygments/style.py
Normal file
@@ -0,0 +1,203 @@
|
||||
"""
|
||||
pygments.style
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Basic style object.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
from pip._vendor.pygments.token import Token, STANDARD_TYPES
|
||||
|
||||
# Default mapping of ansixxx to RGB colors.
|
||||
_ansimap = {
|
||||
# dark
|
||||
'ansiblack': '000000',
|
||||
'ansired': '7f0000',
|
||||
'ansigreen': '007f00',
|
||||
'ansiyellow': '7f7fe0',
|
||||
'ansiblue': '00007f',
|
||||
'ansimagenta': '7f007f',
|
||||
'ansicyan': '007f7f',
|
||||
'ansigray': 'e5e5e5',
|
||||
# normal
|
||||
'ansibrightblack': '555555',
|
||||
'ansibrightred': 'ff0000',
|
||||
'ansibrightgreen': '00ff00',
|
||||
'ansibrightyellow': 'ffff00',
|
||||
'ansibrightblue': '0000ff',
|
||||
'ansibrightmagenta': 'ff00ff',
|
||||
'ansibrightcyan': '00ffff',
|
||||
'ansiwhite': 'ffffff',
|
||||
}
|
||||
# mapping of deprecated #ansixxx colors to new color names
|
||||
_deprecated_ansicolors = {
|
||||
# dark
|
||||
'#ansiblack': 'ansiblack',
|
||||
'#ansidarkred': 'ansired',
|
||||
'#ansidarkgreen': 'ansigreen',
|
||||
'#ansibrown': 'ansiyellow',
|
||||
'#ansidarkblue': 'ansiblue',
|
||||
'#ansipurple': 'ansimagenta',
|
||||
'#ansiteal': 'ansicyan',
|
||||
'#ansilightgray': 'ansigray',
|
||||
# normal
|
||||
'#ansidarkgray': 'ansibrightblack',
|
||||
'#ansired': 'ansibrightred',
|
||||
'#ansigreen': 'ansibrightgreen',
|
||||
'#ansiyellow': 'ansibrightyellow',
|
||||
'#ansiblue': 'ansibrightblue',
|
||||
'#ansifuchsia': 'ansibrightmagenta',
|
||||
'#ansiturquoise': 'ansibrightcyan',
|
||||
'#ansiwhite': 'ansiwhite',
|
||||
}
|
||||
ansicolors = set(_ansimap)
|
||||
|
||||
|
||||
class StyleMeta(type):
|
||||
|
||||
def __new__(mcs, name, bases, dct):
|
||||
obj = type.__new__(mcs, name, bases, dct)
|
||||
for token in STANDARD_TYPES:
|
||||
if token not in obj.styles:
|
||||
obj.styles[token] = ''
|
||||
|
||||
def colorformat(text):
|
||||
if text in ansicolors:
|
||||
return text
|
||||
if text[0:1] == '#':
|
||||
col = text[1:]
|
||||
if len(col) == 6:
|
||||
return col
|
||||
elif len(col) == 3:
|
||||
return col[0] * 2 + col[1] * 2 + col[2] * 2
|
||||
elif text == '':
|
||||
return ''
|
||||
elif text.startswith('var') or text.startswith('calc'):
|
||||
return text
|
||||
assert False, f"wrong color format {text!r}"
|
||||
|
||||
_styles = obj._styles = {}
|
||||
|
||||
for ttype in obj.styles:
|
||||
for token in ttype.split():
|
||||
if token in _styles:
|
||||
continue
|
||||
ndef = _styles.get(token.parent, None)
|
||||
styledefs = obj.styles.get(token, '').split()
|
||||
if not ndef or token is None:
|
||||
ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
|
||||
elif 'noinherit' in styledefs and token is not Token:
|
||||
ndef = _styles[Token][:]
|
||||
else:
|
||||
ndef = ndef[:]
|
||||
_styles[token] = ndef
|
||||
for styledef in obj.styles.get(token, '').split():
|
||||
if styledef == 'noinherit':
|
||||
pass
|
||||
elif styledef == 'bold':
|
||||
ndef[1] = 1
|
||||
elif styledef == 'nobold':
|
||||
ndef[1] = 0
|
||||
elif styledef == 'italic':
|
||||
ndef[2] = 1
|
||||
elif styledef == 'noitalic':
|
||||
ndef[2] = 0
|
||||
elif styledef == 'underline':
|
||||
ndef[3] = 1
|
||||
elif styledef == 'nounderline':
|
||||
ndef[3] = 0
|
||||
elif styledef[:3] == 'bg:':
|
||||
ndef[4] = colorformat(styledef[3:])
|
||||
elif styledef[:7] == 'border:':
|
||||
ndef[5] = colorformat(styledef[7:])
|
||||
elif styledef == 'roman':
|
||||
ndef[6] = 1
|
||||
elif styledef == 'sans':
|
||||
ndef[7] = 1
|
||||
elif styledef == 'mono':
|
||||
ndef[8] = 1
|
||||
else:
|
||||
ndef[0] = colorformat(styledef)
|
||||
|
||||
return obj
|
||||
|
||||
def style_for_token(cls, token):
|
||||
t = cls._styles[token]
|
||||
ansicolor = bgansicolor = None
|
||||
color = t[0]
|
||||
if color in _deprecated_ansicolors:
|
||||
color = _deprecated_ansicolors[color]
|
||||
if color in ansicolors:
|
||||
ansicolor = color
|
||||
color = _ansimap[color]
|
||||
bgcolor = t[4]
|
||||
if bgcolor in _deprecated_ansicolors:
|
||||
bgcolor = _deprecated_ansicolors[bgcolor]
|
||||
if bgcolor in ansicolors:
|
||||
bgansicolor = bgcolor
|
||||
bgcolor = _ansimap[bgcolor]
|
||||
|
||||
return {
|
||||
'color': color or None,
|
||||
'bold': bool(t[1]),
|
||||
'italic': bool(t[2]),
|
||||
'underline': bool(t[3]),
|
||||
'bgcolor': bgcolor or None,
|
||||
'border': t[5] or None,
|
||||
'roman': bool(t[6]) or None,
|
||||
'sans': bool(t[7]) or None,
|
||||
'mono': bool(t[8]) or None,
|
||||
'ansicolor': ansicolor,
|
||||
'bgansicolor': bgansicolor,
|
||||
}
|
||||
|
||||
def list_styles(cls):
|
||||
return list(cls)
|
||||
|
||||
def styles_token(cls, ttype):
|
||||
return ttype in cls._styles
|
||||
|
||||
def __iter__(cls):
|
||||
for token in cls._styles:
|
||||
yield token, cls.style_for_token(token)
|
||||
|
||||
def __len__(cls):
|
||||
return len(cls._styles)
|
||||
|
||||
|
||||
class Style(metaclass=StyleMeta):
|
||||
|
||||
#: overall background color (``None`` means transparent)
|
||||
background_color = '#ffffff'
|
||||
|
||||
#: highlight background color
|
||||
highlight_color = '#ffffcc'
|
||||
|
||||
#: line number font color
|
||||
line_number_color = 'inherit'
|
||||
|
||||
#: line number background color
|
||||
line_number_background_color = 'transparent'
|
||||
|
||||
#: special line number font color
|
||||
line_number_special_color = '#000000'
|
||||
|
||||
#: special line number background color
|
||||
line_number_special_background_color = '#ffffc0'
|
||||
|
||||
#: Style definitions for individual token types.
|
||||
styles = {}
|
||||
|
||||
#: user-friendly style name (used when selecting the style, so this
|
||||
# should be all-lowercase, no spaces, hyphens)
|
||||
name = 'unnamed'
|
||||
|
||||
aliases = []
|
||||
|
||||
# Attribute for lexers defined within Pygments. If set
|
||||
# to True, the style is not shown in the style gallery
|
||||
# on the website. This is intended for language-specific
|
||||
# styles.
|
||||
web_style_gallery_exclude = False
|
||||
@@ -0,0 +1,61 @@
|
||||
"""
|
||||
pygments.styles
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Contains built-in styles.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
from pip._vendor.pygments.plugin import find_plugin_styles
|
||||
from pip._vendor.pygments.util import ClassNotFound
|
||||
from pip._vendor.pygments.styles._mapping import STYLES
|
||||
|
||||
#: A dictionary of built-in styles, mapping style names to
|
||||
#: ``'submodule::classname'`` strings.
|
||||
#: This list is deprecated. Use `pygments.styles.STYLES` instead
|
||||
STYLE_MAP = {v[1]: v[0].split('.')[-1] + '::' + k for k, v in STYLES.items()}
|
||||
|
||||
#: Internal reverse mapping to make `get_style_by_name` more efficient
|
||||
_STYLE_NAME_TO_MODULE_MAP = {v[1]: (v[0], k) for k, v in STYLES.items()}
|
||||
|
||||
|
||||
def get_style_by_name(name):
|
||||
"""
|
||||
Return a style class by its short name. The names of the builtin styles
|
||||
are listed in :data:`pygments.styles.STYLE_MAP`.
|
||||
|
||||
Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is
|
||||
found.
|
||||
"""
|
||||
if name in _STYLE_NAME_TO_MODULE_MAP:
|
||||
mod, cls = _STYLE_NAME_TO_MODULE_MAP[name]
|
||||
builtin = "yes"
|
||||
else:
|
||||
for found_name, style in find_plugin_styles():
|
||||
if name == found_name:
|
||||
return style
|
||||
# perhaps it got dropped into our styles package
|
||||
builtin = ""
|
||||
mod = 'pygments.styles.' + name
|
||||
cls = name.title() + "Style"
|
||||
|
||||
try:
|
||||
mod = __import__(mod, None, None, [cls])
|
||||
except ImportError:
|
||||
raise ClassNotFound(f"Could not find style module {mod!r}" +
|
||||
(builtin and ", though it should be builtin")
|
||||
+ ".")
|
||||
try:
|
||||
return getattr(mod, cls)
|
||||
except AttributeError:
|
||||
raise ClassNotFound(f"Could not find style class {cls!r} in style module.")
|
||||
|
||||
|
||||
def get_all_styles():
|
||||
"""Return a generator for all styles by name, both builtin and plugin."""
|
||||
for v in STYLES.values():
|
||||
yield v[1]
|
||||
for name, _ in find_plugin_styles():
|
||||
yield name
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,54 @@
|
||||
# Automatically generated by scripts/gen_mapfiles.py.
|
||||
# DO NOT EDIT BY HAND; run `tox -e mapfiles` instead.
|
||||
|
||||
STYLES = {
|
||||
'AbapStyle': ('pygments.styles.abap', 'abap', ()),
|
||||
'AlgolStyle': ('pygments.styles.algol', 'algol', ()),
|
||||
'Algol_NuStyle': ('pygments.styles.algol_nu', 'algol_nu', ()),
|
||||
'ArduinoStyle': ('pygments.styles.arduino', 'arduino', ()),
|
||||
'AutumnStyle': ('pygments.styles.autumn', 'autumn', ()),
|
||||
'BlackWhiteStyle': ('pygments.styles.bw', 'bw', ()),
|
||||
'BorlandStyle': ('pygments.styles.borland', 'borland', ()),
|
||||
'CoffeeStyle': ('pygments.styles.coffee', 'coffee', ()),
|
||||
'ColorfulStyle': ('pygments.styles.colorful', 'colorful', ()),
|
||||
'DefaultStyle': ('pygments.styles.default', 'default', ()),
|
||||
'DraculaStyle': ('pygments.styles.dracula', 'dracula', ()),
|
||||
'EmacsStyle': ('pygments.styles.emacs', 'emacs', ()),
|
||||
'FriendlyGrayscaleStyle': ('pygments.styles.friendly_grayscale', 'friendly_grayscale', ()),
|
||||
'FriendlyStyle': ('pygments.styles.friendly', 'friendly', ()),
|
||||
'FruityStyle': ('pygments.styles.fruity', 'fruity', ()),
|
||||
'GhDarkStyle': ('pygments.styles.gh_dark', 'github-dark', ()),
|
||||
'GruvboxDarkStyle': ('pygments.styles.gruvbox', 'gruvbox-dark', ()),
|
||||
'GruvboxLightStyle': ('pygments.styles.gruvbox', 'gruvbox-light', ()),
|
||||
'IgorStyle': ('pygments.styles.igor', 'igor', ()),
|
||||
'InkPotStyle': ('pygments.styles.inkpot', 'inkpot', ()),
|
||||
'LightbulbStyle': ('pygments.styles.lightbulb', 'lightbulb', ()),
|
||||
'LilyPondStyle': ('pygments.styles.lilypond', 'lilypond', ()),
|
||||
'LovelaceStyle': ('pygments.styles.lovelace', 'lovelace', ()),
|
||||
'ManniStyle': ('pygments.styles.manni', 'manni', ()),
|
||||
'MaterialStyle': ('pygments.styles.material', 'material', ()),
|
||||
'MonokaiStyle': ('pygments.styles.monokai', 'monokai', ()),
|
||||
'MurphyStyle': ('pygments.styles.murphy', 'murphy', ()),
|
||||
'NativeStyle': ('pygments.styles.native', 'native', ()),
|
||||
'NordDarkerStyle': ('pygments.styles.nord', 'nord-darker', ()),
|
||||
'NordStyle': ('pygments.styles.nord', 'nord', ()),
|
||||
'OneDarkStyle': ('pygments.styles.onedark', 'one-dark', ()),
|
||||
'ParaisoDarkStyle': ('pygments.styles.paraiso_dark', 'paraiso-dark', ()),
|
||||
'ParaisoLightStyle': ('pygments.styles.paraiso_light', 'paraiso-light', ()),
|
||||
'PastieStyle': ('pygments.styles.pastie', 'pastie', ()),
|
||||
'PerldocStyle': ('pygments.styles.perldoc', 'perldoc', ()),
|
||||
'RainbowDashStyle': ('pygments.styles.rainbow_dash', 'rainbow_dash', ()),
|
||||
'RrtStyle': ('pygments.styles.rrt', 'rrt', ()),
|
||||
'SasStyle': ('pygments.styles.sas', 'sas', ()),
|
||||
'SolarizedDarkStyle': ('pygments.styles.solarized', 'solarized-dark', ()),
|
||||
'SolarizedLightStyle': ('pygments.styles.solarized', 'solarized-light', ()),
|
||||
'StarofficeStyle': ('pygments.styles.staroffice', 'staroffice', ()),
|
||||
'StataDarkStyle': ('pygments.styles.stata_dark', 'stata-dark', ()),
|
||||
'StataLightStyle': ('pygments.styles.stata_light', 'stata-light', ()),
|
||||
'TangoStyle': ('pygments.styles.tango', 'tango', ()),
|
||||
'TracStyle': ('pygments.styles.trac', 'trac', ()),
|
||||
'VimStyle': ('pygments.styles.vim', 'vim', ()),
|
||||
'VisualStudioStyle': ('pygments.styles.vs', 'vs', ()),
|
||||
'XcodeStyle': ('pygments.styles.xcode', 'xcode', ()),
|
||||
'ZenburnStyle': ('pygments.styles.zenburn', 'zenburn', ()),
|
||||
}
|
||||
214
venv/lib/python3.11/site-packages/pip/_vendor/pygments/token.py
Normal file
214
venv/lib/python3.11/site-packages/pip/_vendor/pygments/token.py
Normal file
@@ -0,0 +1,214 @@
|
||||
"""
|
||||
pygments.token
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Basic token types and the standard tokens.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
|
||||
class _TokenType(tuple):
|
||||
parent = None
|
||||
|
||||
def split(self):
|
||||
buf = []
|
||||
node = self
|
||||
while node is not None:
|
||||
buf.append(node)
|
||||
node = node.parent
|
||||
buf.reverse()
|
||||
return buf
|
||||
|
||||
def __init__(self, *args):
|
||||
# no need to call super.__init__
|
||||
self.subtypes = set()
|
||||
|
||||
def __contains__(self, val):
|
||||
return self is val or (
|
||||
type(val) is self.__class__ and
|
||||
val[:len(self)] == self
|
||||
)
|
||||
|
||||
def __getattr__(self, val):
|
||||
if not val or not val[0].isupper():
|
||||
return tuple.__getattribute__(self, val)
|
||||
new = _TokenType(self + (val,))
|
||||
setattr(self, val, new)
|
||||
self.subtypes.add(new)
|
||||
new.parent = self
|
||||
return new
|
||||
|
||||
def __repr__(self):
|
||||
return 'Token' + (self and '.' or '') + '.'.join(self)
|
||||
|
||||
def __copy__(self):
|
||||
# These instances are supposed to be singletons
|
||||
return self
|
||||
|
||||
def __deepcopy__(self, memo):
|
||||
# These instances are supposed to be singletons
|
||||
return self
|
||||
|
||||
|
||||
Token = _TokenType()
|
||||
|
||||
# Special token types
|
||||
Text = Token.Text
|
||||
Whitespace = Text.Whitespace
|
||||
Escape = Token.Escape
|
||||
Error = Token.Error
|
||||
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
|
||||
Other = Token.Other
|
||||
|
||||
# Common token types for source code
|
||||
Keyword = Token.Keyword
|
||||
Name = Token.Name
|
||||
Literal = Token.Literal
|
||||
String = Literal.String
|
||||
Number = Literal.Number
|
||||
Punctuation = Token.Punctuation
|
||||
Operator = Token.Operator
|
||||
Comment = Token.Comment
|
||||
|
||||
# Generic types for non-source code
|
||||
Generic = Token.Generic
|
||||
|
||||
# String and some others are not direct children of Token.
|
||||
# alias them:
|
||||
Token.Token = Token
|
||||
Token.String = String
|
||||
Token.Number = Number
|
||||
|
||||
|
||||
def is_token_subtype(ttype, other):
|
||||
"""
|
||||
Return True if ``ttype`` is a subtype of ``other``.
|
||||
|
||||
exists for backwards compatibility. use ``ttype in other`` now.
|
||||
"""
|
||||
return ttype in other
|
||||
|
||||
|
||||
def string_to_tokentype(s):
|
||||
"""
|
||||
Convert a string into a token type::
|
||||
|
||||
>>> string_to_token('String.Double')
|
||||
Token.Literal.String.Double
|
||||
>>> string_to_token('Token.Literal.Number')
|
||||
Token.Literal.Number
|
||||
>>> string_to_token('')
|
||||
Token
|
||||
|
||||
Tokens that are already tokens are returned unchanged:
|
||||
|
||||
>>> string_to_token(String)
|
||||
Token.Literal.String
|
||||
"""
|
||||
if isinstance(s, _TokenType):
|
||||
return s
|
||||
if not s:
|
||||
return Token
|
||||
node = Token
|
||||
for item in s.split('.'):
|
||||
node = getattr(node, item)
|
||||
return node
|
||||
|
||||
|
||||
# Map standard token types to short names, used in CSS class naming.
|
||||
# If you add a new item, please be sure to run this file to perform
|
||||
# a consistency check for duplicate values.
|
||||
STANDARD_TYPES = {
|
||||
Token: '',
|
||||
|
||||
Text: '',
|
||||
Whitespace: 'w',
|
||||
Escape: 'esc',
|
||||
Error: 'err',
|
||||
Other: 'x',
|
||||
|
||||
Keyword: 'k',
|
||||
Keyword.Constant: 'kc',
|
||||
Keyword.Declaration: 'kd',
|
||||
Keyword.Namespace: 'kn',
|
||||
Keyword.Pseudo: 'kp',
|
||||
Keyword.Reserved: 'kr',
|
||||
Keyword.Type: 'kt',
|
||||
|
||||
Name: 'n',
|
||||
Name.Attribute: 'na',
|
||||
Name.Builtin: 'nb',
|
||||
Name.Builtin.Pseudo: 'bp',
|
||||
Name.Class: 'nc',
|
||||
Name.Constant: 'no',
|
||||
Name.Decorator: 'nd',
|
||||
Name.Entity: 'ni',
|
||||
Name.Exception: 'ne',
|
||||
Name.Function: 'nf',
|
||||
Name.Function.Magic: 'fm',
|
||||
Name.Property: 'py',
|
||||
Name.Label: 'nl',
|
||||
Name.Namespace: 'nn',
|
||||
Name.Other: 'nx',
|
||||
Name.Tag: 'nt',
|
||||
Name.Variable: 'nv',
|
||||
Name.Variable.Class: 'vc',
|
||||
Name.Variable.Global: 'vg',
|
||||
Name.Variable.Instance: 'vi',
|
||||
Name.Variable.Magic: 'vm',
|
||||
|
||||
Literal: 'l',
|
||||
Literal.Date: 'ld',
|
||||
|
||||
String: 's',
|
||||
String.Affix: 'sa',
|
||||
String.Backtick: 'sb',
|
||||
String.Char: 'sc',
|
||||
String.Delimiter: 'dl',
|
||||
String.Doc: 'sd',
|
||||
String.Double: 's2',
|
||||
String.Escape: 'se',
|
||||
String.Heredoc: 'sh',
|
||||
String.Interpol: 'si',
|
||||
String.Other: 'sx',
|
||||
String.Regex: 'sr',
|
||||
String.Single: 's1',
|
||||
String.Symbol: 'ss',
|
||||
|
||||
Number: 'm',
|
||||
Number.Bin: 'mb',
|
||||
Number.Float: 'mf',
|
||||
Number.Hex: 'mh',
|
||||
Number.Integer: 'mi',
|
||||
Number.Integer.Long: 'il',
|
||||
Number.Oct: 'mo',
|
||||
|
||||
Operator: 'o',
|
||||
Operator.Word: 'ow',
|
||||
|
||||
Punctuation: 'p',
|
||||
Punctuation.Marker: 'pm',
|
||||
|
||||
Comment: 'c',
|
||||
Comment.Hashbang: 'ch',
|
||||
Comment.Multiline: 'cm',
|
||||
Comment.Preproc: 'cp',
|
||||
Comment.PreprocFile: 'cpf',
|
||||
Comment.Single: 'c1',
|
||||
Comment.Special: 'cs',
|
||||
|
||||
Generic: 'g',
|
||||
Generic.Deleted: 'gd',
|
||||
Generic.Emph: 'ge',
|
||||
Generic.Error: 'gr',
|
||||
Generic.Heading: 'gh',
|
||||
Generic.Inserted: 'gi',
|
||||
Generic.Output: 'go',
|
||||
Generic.Prompt: 'gp',
|
||||
Generic.Strong: 'gs',
|
||||
Generic.Subheading: 'gu',
|
||||
Generic.EmphStrong: 'ges',
|
||||
Generic.Traceback: 'gt',
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
324
venv/lib/python3.11/site-packages/pip/_vendor/pygments/util.py
Normal file
324
venv/lib/python3.11/site-packages/pip/_vendor/pygments/util.py
Normal file
@@ -0,0 +1,324 @@
|
||||
"""
|
||||
pygments.util
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
Utility functions.
|
||||
|
||||
:copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import re
|
||||
from io import TextIOWrapper
|
||||
|
||||
|
||||
split_path_re = re.compile(r'[/\\ ]')
|
||||
doctype_lookup_re = re.compile(r'''
|
||||
<!DOCTYPE\s+(
|
||||
[a-zA-Z_][a-zA-Z0-9]*
|
||||
(?: \s+ # optional in HTML5
|
||||
[a-zA-Z_][a-zA-Z0-9]*\s+
|
||||
"[^"]*")?
|
||||
)
|
||||
[^>]*>
|
||||
''', re.DOTALL | re.MULTILINE | re.VERBOSE)
|
||||
tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>',
|
||||
re.IGNORECASE | re.DOTALL | re.MULTILINE)
|
||||
xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
|
||||
|
||||
|
||||
class ClassNotFound(ValueError):
|
||||
"""Raised if one of the lookup functions didn't find a matching class."""
|
||||
|
||||
|
||||
class OptionError(Exception):
|
||||
"""
|
||||
This exception will be raised by all option processing functions if
|
||||
the type or value of the argument is not correct.
|
||||
"""
|
||||
|
||||
def get_choice_opt(options, optname, allowed, default=None, normcase=False):
|
||||
"""
|
||||
If the key `optname` from the dictionary is not in the sequence
|
||||
`allowed`, raise an error, otherwise return it.
|
||||
"""
|
||||
string = options.get(optname, default)
|
||||
if normcase:
|
||||
string = string.lower()
|
||||
if string not in allowed:
|
||||
raise OptionError('Value for option {} must be one of {}'.format(optname, ', '.join(map(str, allowed))))
|
||||
return string
|
||||
|
||||
|
||||
def get_bool_opt(options, optname, default=None):
|
||||
"""
|
||||
Intuitively, this is `options.get(optname, default)`, but restricted to
|
||||
Boolean value. The Booleans can be represented as string, in order to accept
|
||||
Boolean value from the command line arguments. If the key `optname` is
|
||||
present in the dictionary `options` and is not associated with a Boolean,
|
||||
raise an `OptionError`. If it is absent, `default` is returned instead.
|
||||
|
||||
The valid string values for ``True`` are ``1``, ``yes``, ``true`` and
|
||||
``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off``
|
||||
(matched case-insensitively).
|
||||
"""
|
||||
string = options.get(optname, default)
|
||||
if isinstance(string, bool):
|
||||
return string
|
||||
elif isinstance(string, int):
|
||||
return bool(string)
|
||||
elif not isinstance(string, str):
|
||||
raise OptionError(f'Invalid type {string!r} for option {optname}; use '
|
||||
'1/0, yes/no, true/false, on/off')
|
||||
elif string.lower() in ('1', 'yes', 'true', 'on'):
|
||||
return True
|
||||
elif string.lower() in ('0', 'no', 'false', 'off'):
|
||||
return False
|
||||
else:
|
||||
raise OptionError(f'Invalid value {string!r} for option {optname}; use '
|
||||
'1/0, yes/no, true/false, on/off')
|
||||
|
||||
|
||||
def get_int_opt(options, optname, default=None):
|
||||
"""As :func:`get_bool_opt`, but interpret the value as an integer."""
|
||||
string = options.get(optname, default)
|
||||
try:
|
||||
return int(string)
|
||||
except TypeError:
|
||||
raise OptionError(f'Invalid type {string!r} for option {optname}; you '
|
||||
'must give an integer value')
|
||||
except ValueError:
|
||||
raise OptionError(f'Invalid value {string!r} for option {optname}; you '
|
||||
'must give an integer value')
|
||||
|
||||
def get_list_opt(options, optname, default=None):
|
||||
"""
|
||||
If the key `optname` from the dictionary `options` is a string,
|
||||
split it at whitespace and return it. If it is already a list
|
||||
or a tuple, it is returned as a list.
|
||||
"""
|
||||
val = options.get(optname, default)
|
||||
if isinstance(val, str):
|
||||
return val.split()
|
||||
elif isinstance(val, (list, tuple)):
|
||||
return list(val)
|
||||
else:
|
||||
raise OptionError(f'Invalid type {val!r} for option {optname}; you '
|
||||
'must give a list value')
|
||||
|
||||
|
||||
def docstring_headline(obj):
|
||||
if not obj.__doc__:
|
||||
return ''
|
||||
res = []
|
||||
for line in obj.__doc__.strip().splitlines():
|
||||
if line.strip():
|
||||
res.append(" " + line.strip())
|
||||
else:
|
||||
break
|
||||
return ''.join(res).lstrip()
|
||||
|
||||
|
||||
def make_analysator(f):
|
||||
"""Return a static text analyser function that returns float values."""
|
||||
def text_analyse(text):
|
||||
try:
|
||||
rv = f(text)
|
||||
except Exception:
|
||||
return 0.0
|
||||
if not rv:
|
||||
return 0.0
|
||||
try:
|
||||
return min(1.0, max(0.0, float(rv)))
|
||||
except (ValueError, TypeError):
|
||||
return 0.0
|
||||
text_analyse.__doc__ = f.__doc__
|
||||
return staticmethod(text_analyse)
|
||||
|
||||
|
||||
def shebang_matches(text, regex):
|
||||
r"""Check if the given regular expression matches the last part of the
|
||||
shebang if one exists.
|
||||
|
||||
>>> from pygments.util import shebang_matches
|
||||
>>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
|
||||
True
|
||||
>>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
|
||||
True
|
||||
>>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
|
||||
False
|
||||
>>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
|
||||
False
|
||||
>>> shebang_matches('#!/usr/bin/startsomethingwith python',
|
||||
... r'python(2\.\d)?')
|
||||
True
|
||||
|
||||
It also checks for common windows executable file extensions::
|
||||
|
||||
>>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
|
||||
True
|
||||
|
||||
Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
|
||||
the same as ``'perl -e'``)
|
||||
|
||||
Note that this method automatically searches the whole string (eg:
|
||||
the regular expression is wrapped in ``'^$'``)
|
||||
"""
|
||||
index = text.find('\n')
|
||||
if index >= 0:
|
||||
first_line = text[:index].lower()
|
||||
else:
|
||||
first_line = text.lower()
|
||||
if first_line.startswith('#!'):
|
||||
try:
|
||||
found = [x for x in split_path_re.split(first_line[2:].strip())
|
||||
if x and not x.startswith('-')][-1]
|
||||
except IndexError:
|
||||
return False
|
||||
regex = re.compile(rf'^{regex}(\.(exe|cmd|bat|bin))?$', re.IGNORECASE)
|
||||
if regex.search(found) is not None:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def doctype_matches(text, regex):
|
||||
"""Check if the doctype matches a regular expression (if present).
|
||||
|
||||
Note that this method only checks the first part of a DOCTYPE.
|
||||
eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
|
||||
"""
|
||||
m = doctype_lookup_re.search(text)
|
||||
if m is None:
|
||||
return False
|
||||
doctype = m.group(1)
|
||||
return re.compile(regex, re.I).match(doctype.strip()) is not None
|
||||
|
||||
|
||||
def html_doctype_matches(text):
|
||||
"""Check if the file looks like it has a html doctype."""
|
||||
return doctype_matches(text, r'html')
|
||||
|
||||
|
||||
_looks_like_xml_cache = {}
|
||||
|
||||
|
||||
def looks_like_xml(text):
|
||||
"""Check if a doctype exists or if we have some tags."""
|
||||
if xml_decl_re.match(text):
|
||||
return True
|
||||
key = hash(text)
|
||||
try:
|
||||
return _looks_like_xml_cache[key]
|
||||
except KeyError:
|
||||
m = doctype_lookup_re.search(text)
|
||||
if m is not None:
|
||||
return True
|
||||
rv = tag_re.search(text[:1000]) is not None
|
||||
_looks_like_xml_cache[key] = rv
|
||||
return rv
|
||||
|
||||
|
||||
def surrogatepair(c):
|
||||
"""Given a unicode character code with length greater than 16 bits,
|
||||
return the two 16 bit surrogate pair.
|
||||
"""
|
||||
# From example D28 of:
|
||||
# http://www.unicode.org/book/ch03.pdf
|
||||
return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
|
||||
|
||||
|
||||
def format_lines(var_name, seq, raw=False, indent_level=0):
|
||||
"""Formats a sequence of strings for output."""
|
||||
lines = []
|
||||
base_indent = ' ' * indent_level * 4
|
||||
inner_indent = ' ' * (indent_level + 1) * 4
|
||||
lines.append(base_indent + var_name + ' = (')
|
||||
if raw:
|
||||
# These should be preformatted reprs of, say, tuples.
|
||||
for i in seq:
|
||||
lines.append(inner_indent + i + ',')
|
||||
else:
|
||||
for i in seq:
|
||||
# Force use of single quotes
|
||||
r = repr(i + '"')
|
||||
lines.append(inner_indent + r[:-2] + r[-1] + ',')
|
||||
lines.append(base_indent + ')')
|
||||
return '\n'.join(lines)
|
||||
|
||||
|
||||
def duplicates_removed(it, already_seen=()):
|
||||
"""
|
||||
Returns a list with duplicates removed from the iterable `it`.
|
||||
|
||||
Order is preserved.
|
||||
"""
|
||||
lst = []
|
||||
seen = set()
|
||||
for i in it:
|
||||
if i in seen or i in already_seen:
|
||||
continue
|
||||
lst.append(i)
|
||||
seen.add(i)
|
||||
return lst
|
||||
|
||||
|
||||
class Future:
|
||||
"""Generic class to defer some work.
|
||||
|
||||
Handled specially in RegexLexerMeta, to support regex string construction at
|
||||
first use.
|
||||
"""
|
||||
def get(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def guess_decode(text):
|
||||
"""Decode *text* with guessed encoding.
|
||||
|
||||
First try UTF-8; this should fail for non-UTF-8 encodings.
|
||||
Then try the preferred locale encoding.
|
||||
Fall back to latin-1, which always works.
|
||||
"""
|
||||
try:
|
||||
text = text.decode('utf-8')
|
||||
return text, 'utf-8'
|
||||
except UnicodeDecodeError:
|
||||
try:
|
||||
import locale
|
||||
prefencoding = locale.getpreferredencoding()
|
||||
text = text.decode()
|
||||
return text, prefencoding
|
||||
except (UnicodeDecodeError, LookupError):
|
||||
text = text.decode('latin1')
|
||||
return text, 'latin1'
|
||||
|
||||
|
||||
def guess_decode_from_terminal(text, term):
|
||||
"""Decode *text* coming from terminal *term*.
|
||||
|
||||
First try the terminal encoding, if given.
|
||||
Then try UTF-8. Then try the preferred locale encoding.
|
||||
Fall back to latin-1, which always works.
|
||||
"""
|
||||
if getattr(term, 'encoding', None):
|
||||
try:
|
||||
text = text.decode(term.encoding)
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
else:
|
||||
return text, term.encoding
|
||||
return guess_decode(text)
|
||||
|
||||
|
||||
def terminal_encoding(term):
|
||||
"""Return our best guess of encoding for the given *term*."""
|
||||
if getattr(term, 'encoding', None):
|
||||
return term.encoding
|
||||
import locale
|
||||
return locale.getpreferredencoding()
|
||||
|
||||
|
||||
class UnclosingTextIOWrapper(TextIOWrapper):
|
||||
# Don't close underlying buffer on destruction.
|
||||
def close(self):
|
||||
self.flush()
|
||||
Reference in New Issue
Block a user