7 :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
8 :license: BSD, see LICENSE for details.
19 make_analysator, Future, guess_decode
22__all__ = [
'Lexer',
'RegexLexer',
'ExtendedRegexLexer',
'DelegatingLexer',
23 'LexerContext',
'include',
'inherit',
'bygroups',
'using',
'this',
24 'default',
'words',
'line_re']
28_encoding_map = [(b
'\xef\xbb\xbf',
'utf-8'),
29 (b
'\xff\xfe\0\0',
'utf-32'),
30 (b
'\0\0\xfe\xff',
'utf-32be'),
31 (b
'\xff\xfe',
'utf-16'),
32 (b
'\xfe\xff',
'utf-16be')]
39 This metaclass automagically converts ``analyse_text`` methods into
40 static methods which always return float values.
44 if 'analyse_text' in d:
45 d[
'analyse_text'] = make_analysator(d[
'analyse_text'])
49class Lexer(metaclass=LexerMeta):
51 Lexer for a specific language.
53 See also :doc:`lexerdevelopment`, a high-level guide to writing
56 Lexer classes have attributes used for choosing the most appropriate
57 lexer based on various criteria.
59 .. autoattribute:: name
61 .. autoattribute:: aliases
63 .. autoattribute:: filenames
65 .. autoattribute:: alias_filenames
66 .. autoattribute:: mimetypes
68 .. autoattribute:: priority
70 Lexers included in Pygments should have an additional attribute:
72 .. autoattribute:: url
75 You can pass options to the constructor. The basic options recognized
76 by all lexers and processed by the base `Lexer` class are:
79 Strip leading and trailing newlines from the input (default: True).
81 Strip all leading and trailing whitespace from the input
84 Make sure that the input ends with a newline (default: True). This
85 is required for some lexers that consume input linewise.
90 If given and greater than 0, expand tabs in the input (default: 0).
92 If given, must be an encoding name. This encoding will be used to
93 convert the input string to Unicode, if it is not already a Unicode
94 string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
95 Latin1 detection. Can also be ``'chardet'`` to use the chardet
96 library, if it is installed.
98 Overrides the ``encoding`` if given.
133 This constructor takes arbitrary options as keyword arguments.
134 Every subclass must first process its own options and then call
135 the `Lexer` constructor, since it processes the basic
136 options like `stripnl`.
138 An example looks like this:
140 .. sourcecode:: python
142 def __init__(self, **options):
143 self.compress = options.get('compress', '')
144 Lexer.__init__(self, **options)
146 As these options must all be specifiable as strings (due to the
147 command line usage), there are various utility functions
148 available to help with that, see `Utilities`_.
151 self.
stripnl = get_bool_opt(options,
'stripnl',
True)
152 self.
stripall = get_bool_opt(options,
'stripall',
False)
153 self.
ensurenl = get_bool_opt(options,
'ensurenl',
True)
154 self.
tabsize = get_int_opt(options,
'tabsize', 0)
158 for filter_
in get_list_opt(options,
'filters', ()):
163 return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
166 return '<pygments.lexers.%s>' % self.__class__.__name__
170 Add a new stream filter to this lexer.
173 filter_ = get_filter_by_name(filter_, **options)
178 A static method which is called for lexer guessing.
180 It should analyse the text and return a float in the range
181 from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer
182 will not be selected as the most probable one, if it returns
183 ``1.0``, it will be selected immediately. This is used by
186 The `LexerMeta` metaclass automatically wraps this function so
187 that it works like a static method (no ``self`` or ``cls``
188 parameter) and the return value is automatically converted to
189 `float`. If the return value is an object that is boolean `False`
190 it's the same as if the return values was ``0.0``.
195 This method is the basic interface of a lexer. It is called by
196 the `highlight()` function. It must process the text and return an
197 iterable of ``(tokentype, value)`` pairs from `text`.
199 Normally, you don't need to override this method. The default
200 implementation processes the options recognized by all lexers
201 (`stripnl`, `stripall` and so on), and then yields all tokens
202 from `get_tokens_unprocessed()`, with the ``index`` dropped.
204 If `unfiltered` is set to `True`, the filtering mechanism is
205 bypassed even if filters are defined.
208 if self.encoding ==
'guess':
209 text, _ = guess_decode(text)
210 elif self.encoding ==
'chardet':
213 except ImportError
as e:
214 raise ImportError(
'To enable chardet encoding guessing, '
215 'please install the chardet library '
216 'from http://chardet.feedparser.org/')
from e
219 for bom, encoding
in _encoding_map:
221 decoded = text[
len(bom):].decode(encoding,
'replace')
232 text = text[
len(
'\ufeff'):]
235 text = text[
len(
'\ufeff'):]
250 for _, t, v
in self.get_tokens_unprocessed(text):
254 stream = apply_filters(stream, self.filters, self)
259 This method should process the text and return an iterable of
260 ``(index, tokentype, value)`` tuples where ``index`` is the starting
261 position of the token within the input text.
263 It must be overridden by subclasses. It is recommended to
264 implement it as a generator to maximize effectiveness.
266 raise NotImplementedError
271 This lexer takes two lexer as arguments. A root lexer and
272 a language lexer. First everything is scanned using the language
273 lexer, afterwards all ``Other`` tokens are lexed using the root
276 The lexers from the ``template`` lexer package use this base lexer.
279 def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
299 return do_insertions(insertions,
310 Indicates that a state should include rules from another state.
317 Indicates the a state should inherit from its superclass.
327 Indicates a state combined from multiple states.
340 A pseudo match object constructed from a string.
353 def group(self, arg=None):
367 Callback that yields multiple actions for each group in the match.
369 def callback(lexer, match, ctx=None):
373 elif type(action)
is _TokenType:
393 Special singleton used for indicating the caller class.
400def using(_other, **kwargs):
402 Callback that processes the match with a different lexer.
404 The keyword arguments are forwarded to the lexer, except `state` which
405 is handled separately.
407 `state` specifies the state that the new lexer will start in, and can
408 be an enumerable such as ('root', 'inline', 'string') or a simple
409 string which is assumed to be on top of the root state.
411 Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
414 if 'state' in kwargs:
417 gt_kwargs[
'stack'] = s
419 gt_kwargs[
'stack'] = (
'root', s)
422 def callback(lexer, match, ctx=None):
437 def callback(lexer, match, ctx=None):
452 Indicates a state or state action (e.g. #pop) to apply.
453 For example default('#pop') is equivalent to ('', Token, '#pop')
454 Note that state tuples may be used as well.
456 .. versionadded:: 2.0
464 Indicates a list of literal words that is transformed into an optimized
465 regex that matches any of the words.
467 .. versionadded:: 2.0
480 Metaclass for RegexLexer, creates the self._tokens attribute from
481 self.tokens on the first instantiation.
485 """Preprocess the regular expression component of a token definition."""
491 """Preprocess the token component of a token definition."""
492 assert type(token)
is _TokenType
or callable(token), \
493 'token type must be simple type or callable, not %r' % (token,)
497 """Preprocess the state transition action of a token definition."""
500 if new_state ==
'#pop':
502 elif new_state
in unprocessed:
504 elif new_state ==
'#push':
506 elif new_state[:5] ==
'#pop:':
507 return -int(new_state[5:])
509 assert False,
'unknown new state %r' % new_state
512 tmp_state =
'_tmp_%d' % cls.
_tmpname
515 for istate
in new_state:
516 assert istate != new_state,
'circular state ref %r' % istate
519 processed[tmp_state] = itokens
523 for istate
in new_state:
524 assert (istate
in unprocessed
or
525 istate
in (
'#pop',
'#push')), \
526 'unknown new state ' + istate
529 assert False,
'unknown new state def %r' % new_state
532 """Preprocess a single state definition."""
533 assert type(state)
is str,
"wrong state name %r" % state
534 assert state[0] !=
'#',
"invalid state name %r" % state
535 if state
in processed:
536 return processed[state]
537 tokens = processed[state] = []
539 for tdef
in unprocessed[state]:
542 assert tdef != state,
"circular state reference %r" % state
556 assert type(tdef)
is tuple,
"wrong rule def %r" % tdef
560 except Exception
as err:
561 raise ValueError(
"uncompilable regex %r in state %r of %r: %s" %
562 (tdef[0], state, cls, err))
from err
570 unprocessed, processed)
576 """Preprocess a dictionary of token definitions."""
578 tokendefs = tokendefs
or cls.tokens[name]
579 for state
in list(tokendefs):
585 Merge tokens from superclasses in MRO order, returning a single tokendef
588 Any state that is not defined by a subclass will be inherited
589 automatically. States that *are* defined by subclasses will, by
590 default, override that state in the superclass. If a subclass wishes to
591 inherit definitions from a superclass, it can use the special value
592 "inherit", which will cause the superclass' state definition to be
593 included at that point in the state.
597 for c
in cls.__mro__:
607 tokens[state] = items
612 inheritable[state] = inherit_ndx
616 if inherit_ndx
is None:
620 curitems[inherit_ndx:inherit_ndx+1] = items
628 inheritable[state] = inherit_ndx + new_inh_ndx
633 """Instantiate cls after preprocessing its token definitions."""
634 if '_tokens' not in cls.__dict__:
637 if hasattr(cls,
'token_variants')
and cls.token_variants:
648 Base for simple stateful regular expression-based lexers.
649 Simplifies the lexing process so that you need only
650 provide a list of states and regular expressions.
683 Split ``text`` into (tokentype, text) pairs.
685 ``stack`` is the initial stack (default: ``['root']``)
689 statestack = list(stack)
690 statetokens = tokendefs[statestack[-1]]
692 for rexmatch, action, new_state
in statetokens:
695 if action
is not None:
696 if type(action)
is _TokenType:
699 yield from action(self, m)
701 if new_state
is not None:
704 for state
in new_state:
706 if len(statestack) > 1:
708 elif state ==
'#push':
716 if abs(new_state) >=
len(statestack):
719 del statestack[new_state:]
720 elif new_state ==
'#push':
723 assert False,
"wrong state def: %r" % new_state
724 statetokens = tokendefs[statestack[-1]]
730 if text[pos] ==
'\n':
732 statestack = [
'root']
733 statetokens = tokendefs[
'root']
734 yield pos, Whitespace,
'\n'
737 yield pos, Error, text[pos]
745 A helper object that holds lexer position data.
748 def __init__(self, text, pos, stack=None, end=None):
755 return 'LexerContext(%r, %r, %r)' % (
761 A RegexLexer that uses a context object to store its state.
766 Split ``text`` into (tokentype, text) pairs.
767 If ``context`` is given, use this lexer context instead.
772 statetokens = tokendefs[
'root']
778 for rexmatch, action, new_state
in statetokens:
781 if action
is not None:
782 if type(action)
is _TokenType:
786 yield from action(self, m, ctx)
791 if new_state
is not None:
794 for state
in new_state:
798 elif state ==
'#push':
808 elif new_state ==
'#push':
811 assert False,
"wrong state def: %r" % new_state
821 statetokens = tokendefs[
'root']
831def do_insertions(insertions, tokens):
833 Helper for lexers which must combine the results of several
836 ``insertions`` is a list of ``(index, itokens)`` pairs.
837 Each ``itokens`` iterable should be inserted at position
838 ``index`` into the token stream given by the ``tokens``
841 The result is a combined token stream.
843 TODO: clean up the code here.
845 insertions = iter(insertions)
847 index, itokens = next(insertions)
848 except StopIteration:
858 for i, t, v
in tokens:
863 while insleft
and i +
len(v) >= index:
864 tmpval = v[oldi:index - i]
866 yield realpos, t, tmpval
867 realpos +=
len(tmpval)
868 for it_index, it_token, it_value
in itokens:
869 yield realpos, it_token, it_value
870 realpos +=
len(it_value)
873 index, itokens = next(insertions)
874 except StopIteration:
878 yield realpos, t, v[oldi:]
879 realpos +=
len(v) - oldi
884 realpos = realpos
or 0
885 for p, t, v
in itokens:
889 index, itokens = next(insertions)
890 except StopIteration:
896 """Metaclass for ProfilingRegexLexer, collects regex timing info."""
906 def match_func(text, pos, endpos=sys.maxsize):
907 info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
918 """Drop-in replacement for RegexLexer that does profiling of its regexes."""
928 data = sorted(((s, repr(r).strip(
'u\'').replace(
'\\\\',
'\\')[:65],
929 n, 1000 * t, 1000 * t / n)
933 sum_total =
sum(x[3]
for x
in data)
936 print(
'Profiling result for %s lexing %d chars in %.3f ms' %
937 (self.__class__.__name__,
len(text), sum_total))
939 print(
'%-20s %-64s ncalls tottime percall' % (
'state',
'regex'))
942 print(
'%-20s %-65s %5d %8.4f %8.4f' % d)
get_tokens_unprocessed(self, text)
__init__(self, _root_lexer, _language_lexer, _needle=Other, **options)
get_tokens_unprocessed(self, text=None, context=None)
__init__(self, text, pos, stack=None, end=None)
__init__(self, **options)
get_tokens_unprocessed(self, text)
add_filter(self, filter_, **options)
get_tokens(self, text, unfiltered=False)
get_tokens_unprocessed(self, text, stack=('root',))
get_tokens_unprocessed(self, text, stack=('root',))
__init__(self, start, text)
__init__(self, words, prefix='', suffix='')