Update vendor'd Pygments to 2.1.3
This commit is contained in:
123
vendor/pygments/scripts/check_sources.py
vendored
123
vendor/pygments/scripts/check_sources.py
vendored
@@ -7,20 +7,26 @@
|
||||
Make sure each Python file has a correct file header
|
||||
including copyright and license information.
|
||||
|
||||
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import sys, os, re
|
||||
from __future__ import print_function
|
||||
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import getopt
|
||||
import cStringIO
|
||||
from os.path import join, splitext, abspath
|
||||
|
||||
|
||||
checkers = {}
|
||||
|
||||
|
||||
def checker(*suffixes, **kwds):
|
||||
only_pkg = kwds.pop('only_pkg', False)
|
||||
|
||||
def deco(func):
|
||||
for suffix in suffixes:
|
||||
checkers.setdefault(suffix, []).append(func)
|
||||
@@ -30,55 +36,42 @@ def checker(*suffixes, **kwds):
|
||||
|
||||
|
||||
name_mail_re = r'[\w ]+(<.*?>)?'
|
||||
copyright_re = re.compile(r'^ :copyright: Copyright 2006-2013 by '
|
||||
copyright_re = re.compile(r'^ :copyright: Copyright 2006-2015 by '
|
||||
r'the Pygments team, see AUTHORS\.$', re.UNICODE)
|
||||
copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
|
||||
(name_mail_re, name_mail_re), re.UNICODE)
|
||||
coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
|
||||
not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+')
|
||||
is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b')
|
||||
|
||||
misspellings = ["developement", "adress", "verificate", # ALLOW-MISSPELLING
|
||||
"informations"] # ALLOW-MISSPELLING
|
||||
"informations", "unlexer"] # ALLOW-MISSPELLING
|
||||
|
||||
|
||||
@checker('.py')
|
||||
def check_syntax(fn, lines):
|
||||
if '#!/' in lines[0]:
|
||||
lines = lines[1:]
|
||||
if 'coding:' in lines[0]:
|
||||
lines = lines[1:]
|
||||
try:
|
||||
compile(''.join(lines), fn, "exec")
|
||||
except SyntaxError, err:
|
||||
compile('\n'.join(lines), fn, "exec")
|
||||
except SyntaxError as err:
|
||||
yield 0, "not compilable: %s" % err
|
||||
|
||||
|
||||
@checker('.py')
|
||||
def check_style_and_encoding(fn, lines):
|
||||
encoding = 'ascii'
|
||||
for lno, line in enumerate(lines):
|
||||
if len(line) > 90:
|
||||
if len(line) > 110:
|
||||
yield lno+1, "line too long"
|
||||
m = not_ix_re.search(line)
|
||||
if m:
|
||||
yield lno+1, '"' + m.group() + '"'
|
||||
if is_const_re.search(line):
|
||||
yield lno+1, 'using == None/True/False'
|
||||
if lno < 2:
|
||||
co = coding_re.search(line)
|
||||
if co:
|
||||
encoding = co.group(1)
|
||||
try:
|
||||
line.decode(encoding)
|
||||
except UnicodeDecodeError, err:
|
||||
yield lno+1, "not decodable: %s\n Line: %r" % (err, line)
|
||||
except LookupError, err:
|
||||
yield 0, "unknown encoding: %s" % encoding
|
||||
encoding = 'latin1'
|
||||
|
||||
|
||||
@checker('.py', only_pkg=True)
|
||||
def check_fileheader(fn, lines):
|
||||
# line number correction
|
||||
c = 1
|
||||
if lines[0:1] == ['#!/usr/bin/env python\n']:
|
||||
if lines[0:1] == ['#!/usr/bin/env python']:
|
||||
lines = lines[1:]
|
||||
c = 2
|
||||
|
||||
@@ -87,31 +80,28 @@ def check_fileheader(fn, lines):
|
||||
for lno, l in enumerate(lines):
|
||||
llist.append(l)
|
||||
if lno == 0:
|
||||
if l == '# -*- coding: rot13 -*-\n':
|
||||
# special-case pony package
|
||||
return
|
||||
elif l != '# -*- coding: utf-8 -*-\n':
|
||||
if l != '# -*- coding: utf-8 -*-':
|
||||
yield 1, "missing coding declaration"
|
||||
elif lno == 1:
|
||||
if l != '"""\n' and l != 'r"""\n':
|
||||
if l != '"""' and l != 'r"""':
|
||||
yield 2, 'missing docstring begin (""")'
|
||||
else:
|
||||
docopen = True
|
||||
elif docopen:
|
||||
if l == '"""\n':
|
||||
if l == '"""':
|
||||
# end of docstring
|
||||
if lno <= 4:
|
||||
yield lno+c, "missing module name in docstring"
|
||||
break
|
||||
|
||||
if l != "\n" and l[:4] != ' ' and docopen:
|
||||
if l != "" and l[:4] != ' ' and docopen:
|
||||
yield lno+c, "missing correct docstring indentation"
|
||||
|
||||
if lno == 2:
|
||||
# if not in package, don't check the module name
|
||||
modname = fn[:-3].replace('/', '.').replace('.__init__', '')
|
||||
while modname:
|
||||
if l.lower()[4:-1] == modname:
|
||||
if l.lower()[4:] == modname:
|
||||
break
|
||||
modname = '.'.join(modname.split('.')[1:])
|
||||
else:
|
||||
@@ -126,11 +116,11 @@ def check_fileheader(fn, lines):
|
||||
|
||||
# check for copyright and license fields
|
||||
license = llist[-2:-1]
|
||||
if license != [" :license: BSD, see LICENSE for details.\n"]:
|
||||
if license != [" :license: BSD, see LICENSE for details."]:
|
||||
yield 0, "no correct license info"
|
||||
|
||||
ci = -3
|
||||
copyright = [s.decode('utf-8') for s in llist[ci:ci+1]]
|
||||
copyright = llist[ci:ci+1]
|
||||
while copyright and copyright_2_re.match(copyright[0]):
|
||||
ci -= 1
|
||||
copyright = llist[ci:ci+1]
|
||||
@@ -138,34 +128,11 @@ def check_fileheader(fn, lines):
|
||||
yield 0, "no correct copyright info"
|
||||
|
||||
|
||||
@checker('.py', '.html', '.js')
|
||||
def check_whitespace_and_spelling(fn, lines):
|
||||
for lno, line in enumerate(lines):
|
||||
if "\t" in line:
|
||||
yield lno+1, "OMG TABS!!!1 "
|
||||
if line[:-1].rstrip(' \t') != line[:-1]:
|
||||
yield lno+1, "trailing whitespace"
|
||||
for word in misspellings:
|
||||
if word in line and 'ALLOW-MISSPELLING' not in line:
|
||||
yield lno+1, '"%s" used' % word
|
||||
|
||||
|
||||
bad_tags = ('<b>', '<i>', '<u>', '<s>', '<strike>'
|
||||
'<center>', '<big>', '<small>', '<font')
|
||||
|
||||
@checker('.html')
|
||||
def check_xhtml(fn, lines):
|
||||
for lno, line in enumerate(lines):
|
||||
for bad_tag in bad_tags:
|
||||
if bad_tag in line:
|
||||
yield lno+1, "used " + bad_tag
|
||||
|
||||
|
||||
def main(argv):
|
||||
try:
|
||||
gopts, args = getopt.getopt(argv[1:], "vi:")
|
||||
except getopt.GetoptError:
|
||||
print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
|
||||
print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
|
||||
return 2
|
||||
opts = {}
|
||||
for opt, val in gopts:
|
||||
@@ -178,30 +145,33 @@ def main(argv):
|
||||
elif len(args) == 1:
|
||||
path = args[0]
|
||||
else:
|
||||
print "Usage: %s [-v] [-i ignorepath]* [path]" % argv[0]
|
||||
print("Usage: %s [-v] [-i ignorepath]* [path]" % argv[0])
|
||||
return 2
|
||||
|
||||
verbose = '-v' in opts
|
||||
|
||||
num = 0
|
||||
out = cStringIO.StringIO()
|
||||
out = io.StringIO()
|
||||
|
||||
# TODO: replace os.walk run with iteration over output of
|
||||
# `svn list -R`.
|
||||
|
||||
for root, dirs, files in os.walk(path):
|
||||
if '.svn' in dirs:
|
||||
dirs.remove('.svn')
|
||||
if '.hg' in dirs:
|
||||
dirs.remove('.hg')
|
||||
if 'examplefiles' in dirs:
|
||||
dirs.remove('examplefiles')
|
||||
if '-i' in opts and abspath(root) in opts['-i']:
|
||||
del dirs[:]
|
||||
continue
|
||||
# XXX: awkward: for the Makefile call: don't check non-package
|
||||
# files for file headers
|
||||
in_pocoo_pkg = root.startswith('./pygments')
|
||||
in_pygments_pkg = root.startswith('./pygments')
|
||||
for fn in files:
|
||||
|
||||
fn = join(root, fn)
|
||||
if fn[:2] == './': fn = fn[2:]
|
||||
if fn[:2] == './':
|
||||
fn = fn[2:]
|
||||
|
||||
if '-i' in opts and abspath(fn) in opts['-i']:
|
||||
continue
|
||||
@@ -212,29 +182,28 @@ def main(argv):
|
||||
continue
|
||||
|
||||
if verbose:
|
||||
print "Checking %s..." % fn
|
||||
print("Checking %s..." % fn)
|
||||
|
||||
try:
|
||||
f = open(fn, 'r')
|
||||
lines = list(f)
|
||||
except (IOError, OSError), err:
|
||||
print "%s: cannot open: %s" % (fn, err)
|
||||
lines = open(fn, 'rb').read().decode('utf-8').splitlines()
|
||||
except (IOError, OSError) as err:
|
||||
print("%s: cannot open: %s" % (fn, err))
|
||||
num += 1
|
||||
continue
|
||||
|
||||
for checker in checkerlist:
|
||||
if not in_pocoo_pkg and checker.only_pkg:
|
||||
if not in_pygments_pkg and checker.only_pkg:
|
||||
continue
|
||||
for lno, msg in checker(fn, lines):
|
||||
print >>out, "%s:%d: %s" % (fn, lno, msg)
|
||||
print(u"%s:%d: %s" % (fn, lno, msg), file=out)
|
||||
num += 1
|
||||
if verbose:
|
||||
print
|
||||
print()
|
||||
if num == 0:
|
||||
print "No errors found."
|
||||
print("No errors found.")
|
||||
else:
|
||||
print out.getvalue().rstrip('\n')
|
||||
print "%d error%s found." % (num, num > 1 and "s" or "")
|
||||
print(out.getvalue().rstrip('\n'))
|
||||
print("%d error%s found." % (num, num > 1 and "s" or ""))
|
||||
return int(num > 0)
|
||||
|
||||
|
||||
|
||||
244
vendor/pygments/scripts/debug_lexer.py
vendored
Executable file
244
vendor/pygments/scripts/debug_lexer.py
vendored
Executable file
@@ -0,0 +1,244 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Lexing error finder
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
For the source files given on the command line, display
|
||||
the text where Error tokens are being generated, along
|
||||
with some context.
|
||||
|
||||
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# always prefer Pygments from source if exists
|
||||
srcpath = os.path.join(os.path.dirname(__file__), '..')
|
||||
if os.path.isdir(os.path.join(srcpath, 'pygments')):
|
||||
sys.path.insert(0, srcpath)
|
||||
|
||||
|
||||
from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
|
||||
ProfilingRegexLexer, ProfilingRegexLexerMeta
|
||||
from pygments.lexers import get_lexer_by_name, find_lexer_class, \
|
||||
find_lexer_class_for_filename
|
||||
from pygments.token import Error, Text, _TokenType
|
||||
from pygments.cmdline import _parse_options
|
||||
|
||||
|
||||
class DebuggingRegexLexer(ExtendedRegexLexer):
|
||||
"""Make the state stack, position and current match instance attributes."""
|
||||
|
||||
def get_tokens_unprocessed(self, text, stack=('root',)):
|
||||
"""
|
||||
Split ``text`` into (tokentype, text) pairs.
|
||||
|
||||
``stack`` is the inital stack (default: ``['root']``)
|
||||
"""
|
||||
tokendefs = self._tokens
|
||||
self.ctx = ctx = LexerContext(text, 0)
|
||||
ctx.stack = list(stack)
|
||||
statetokens = tokendefs[ctx.stack[-1]]
|
||||
while 1:
|
||||
for rexmatch, action, new_state in statetokens:
|
||||
self.m = m = rexmatch(text, ctx.pos, ctx.end)
|
||||
if m:
|
||||
if action is not None:
|
||||
if type(action) is _TokenType:
|
||||
yield ctx.pos, action, m.group()
|
||||
ctx.pos = m.end()
|
||||
else:
|
||||
if not isinstance(self, ExtendedRegexLexer):
|
||||
for item in action(self, m):
|
||||
yield item
|
||||
ctx.pos = m.end()
|
||||
else:
|
||||
for item in action(self, m, ctx):
|
||||
yield item
|
||||
if not new_state:
|
||||
# altered the state stack?
|
||||
statetokens = tokendefs[ctx.stack[-1]]
|
||||
if new_state is not None:
|
||||
# state transition
|
||||
if isinstance(new_state, tuple):
|
||||
for state in new_state:
|
||||
if state == '#pop':
|
||||
ctx.stack.pop()
|
||||
elif state == '#push':
|
||||
ctx.stack.append(ctx.stack[-1])
|
||||
else:
|
||||
ctx.stack.append(state)
|
||||
elif isinstance(new_state, int):
|
||||
# pop
|
||||
del ctx.stack[new_state:]
|
||||
elif new_state == '#push':
|
||||
ctx.stack.append(ctx.stack[-1])
|
||||
else:
|
||||
assert False, 'wrong state def: %r' % new_state
|
||||
statetokens = tokendefs[ctx.stack[-1]]
|
||||
break
|
||||
else:
|
||||
try:
|
||||
if ctx.pos >= ctx.end:
|
||||
break
|
||||
if text[ctx.pos] == '\n':
|
||||
# at EOL, reset state to 'root'
|
||||
ctx.stack = ['root']
|
||||
statetokens = tokendefs['root']
|
||||
yield ctx.pos, Text, u'\n'
|
||||
ctx.pos += 1
|
||||
continue
|
||||
yield ctx.pos, Error, text[ctx.pos]
|
||||
ctx.pos += 1
|
||||
except IndexError:
|
||||
break
|
||||
|
||||
|
||||
def main(fn, lexer=None, options={}):
|
||||
if lexer is not None:
|
||||
lxcls = get_lexer_by_name(lexer).__class__
|
||||
else:
|
||||
lxcls = find_lexer_class_for_filename(os.path.basename(fn))
|
||||
if lxcls is None:
|
||||
name, rest = fn.split('_', 1)
|
||||
lxcls = find_lexer_class(name)
|
||||
if lxcls is None:
|
||||
raise AssertionError('no lexer found for file %r' % fn)
|
||||
debug_lexer = False
|
||||
# if profile:
|
||||
# # does not work for e.g. ExtendedRegexLexers
|
||||
# if lxcls.__bases__ == (RegexLexer,):
|
||||
# # yes we can! (change the metaclass)
|
||||
# lxcls.__class__ = ProfilingRegexLexerMeta
|
||||
# lxcls.__bases__ = (ProfilingRegexLexer,)
|
||||
# lxcls._prof_sort_index = profsort
|
||||
# else:
|
||||
# if lxcls.__bases__ == (RegexLexer,):
|
||||
# lxcls.__bases__ = (DebuggingRegexLexer,)
|
||||
# debug_lexer = True
|
||||
# elif lxcls.__bases__ == (DebuggingRegexLexer,):
|
||||
# # already debugged before
|
||||
# debug_lexer = True
|
||||
# else:
|
||||
# # HACK: ExtendedRegexLexer subclasses will only partially work here.
|
||||
# lxcls.__bases__ = (DebuggingRegexLexer,)
|
||||
# debug_lexer = True
|
||||
|
||||
lx = lxcls(**options)
|
||||
lno = 1
|
||||
if fn == '-':
|
||||
text = sys.stdin.read()
|
||||
else:
|
||||
with open(fn, 'rb') as fp:
|
||||
text = fp.read().decode('utf-8')
|
||||
text = text.strip('\n') + '\n'
|
||||
tokens = []
|
||||
states = []
|
||||
|
||||
def show_token(tok, state):
|
||||
reprs = list(map(repr, tok))
|
||||
print(' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0], end=' ')
|
||||
if debug_lexer:
|
||||
print(' ' + ' ' * (29-len(reprs[0])) + ' : '.join(state) if state else '', end=' ')
|
||||
print()
|
||||
|
||||
for type, val in lx.get_tokens(text):
|
||||
lno += val.count('\n')
|
||||
if type == Error and not ignerror:
|
||||
print('Error parsing', fn, 'on line', lno)
|
||||
if not showall:
|
||||
print('Previous tokens' + (debug_lexer and ' and states' or '') + ':')
|
||||
for i in range(max(len(tokens) - num, 0), len(tokens)):
|
||||
if debug_lexer:
|
||||
show_token(tokens[i], states[i])
|
||||
else:
|
||||
show_token(tokens[i], None)
|
||||
print('Error token:')
|
||||
l = len(repr(val))
|
||||
print(' ' + repr(val), end=' ')
|
||||
if debug_lexer and hasattr(lx, 'ctx'):
|
||||
print(' ' * (60-l) + ' : '.join(lx.ctx.stack), end=' ')
|
||||
print()
|
||||
print()
|
||||
return 1
|
||||
tokens.append((type, val))
|
||||
if debug_lexer:
|
||||
if hasattr(lx, 'ctx'):
|
||||
states.append(lx.ctx.stack[:])
|
||||
else:
|
||||
states.append(None)
|
||||
if showall:
|
||||
show_token((type, val), states[-1] if debug_lexer else None)
|
||||
return 0
|
||||
|
||||
|
||||
def print_help():
|
||||
print('''\
|
||||
Pygments development helper to quickly debug lexers.
|
||||
|
||||
scripts/debug_lexer.py [options] file ...
|
||||
|
||||
Give one or more filenames to lex them and display possible error tokens
|
||||
and/or profiling info. Files are assumed to be encoded in UTF-8.
|
||||
|
||||
Selecting lexer and options:
|
||||
|
||||
-l NAME use lexer named NAME (default is to guess from
|
||||
the given filenames)
|
||||
-O OPTIONSTR use lexer options parsed from OPTIONSTR
|
||||
|
||||
Debugging lexing errors:
|
||||
|
||||
-n N show the last N tokens on error
|
||||
-a always show all lexed tokens (default is only
|
||||
to show them when an error occurs)
|
||||
-e do not stop on error tokens
|
||||
|
||||
Profiling:
|
||||
|
||||
-p use the ProfilingRegexLexer to profile regexes
|
||||
instead of the debugging lexer
|
||||
-s N sort profiling output by column N (default is
|
||||
column 4, the time per call)
|
||||
''')
|
||||
|
||||
num = 10
|
||||
showall = False
|
||||
ignerror = False
|
||||
lexer = None
|
||||
options = {}
|
||||
profile = False
|
||||
profsort = 4
|
||||
|
||||
if __name__ == '__main__':
|
||||
import getopt
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'n:l:aepO:s:h')
|
||||
for opt, val in opts:
|
||||
if opt == '-n':
|
||||
num = int(val)
|
||||
elif opt == '-a':
|
||||
showall = True
|
||||
elif opt == '-e':
|
||||
ignerror = True
|
||||
elif opt == '-l':
|
||||
lexer = val
|
||||
elif opt == '-p':
|
||||
profile = True
|
||||
elif opt == '-s':
|
||||
profsort = int(val)
|
||||
elif opt == '-O':
|
||||
options = _parse_options([val])
|
||||
elif opt == '-h':
|
||||
print_help()
|
||||
sys.exit(0)
|
||||
ret = 0
|
||||
if not args:
|
||||
print_help()
|
||||
for f in args:
|
||||
ret += main(f, lexer, options)
|
||||
sys.exit(bool(ret))
|
||||
@@ -1,3 +1,4 @@
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
|
||||
from pygments.lexers import get_all_lexers, find_lexer_class
|
||||
@@ -9,22 +10,22 @@ def main():
|
||||
for name, aliases, filenames, mimetypes in get_all_lexers():
|
||||
cls = find_lexer_class(name)
|
||||
if not cls.aliases:
|
||||
print cls, "has no aliases"
|
||||
print(cls, "has no aliases")
|
||||
for f in filenames:
|
||||
if f not in uses:
|
||||
uses[f] = []
|
||||
uses[f].append(cls)
|
||||
|
||||
ret = 0
|
||||
for k, v in uses.iteritems():
|
||||
for k, v in uses.items():
|
||||
if len(v) > 1:
|
||||
#print "Multiple for", k, v
|
||||
for i in v:
|
||||
if i.analyse_text is None:
|
||||
print i, "has a None analyse_text"
|
||||
print(i, "has a None analyse_text")
|
||||
ret |= 1
|
||||
elif Lexer.analyse_text.__doc__ == i.analyse_text.__doc__:
|
||||
print i, "needs analyse_text, multiple lexers for", k
|
||||
print(i, "needs analyse_text, multiple lexers for", k)
|
||||
ret |= 2
|
||||
return ret
|
||||
|
||||
|
||||
205
vendor/pygments/scripts/find_codetags.py
vendored
205
vendor/pygments/scripts/find_codetags.py
vendored
@@ -1,205 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Codetags finder
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Find code tags in specified files and/or directories
|
||||
and create a report in HTML format.
|
||||
|
||||
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import sys, os, re
|
||||
import getopt
|
||||
from os.path import join, abspath, isdir, isfile
|
||||
|
||||
|
||||
TAGS = set(('XXX', 'TODO', 'FIXME', 'HACK'))
|
||||
|
||||
tag_re = re.compile(
|
||||
r'(?P<tag>\b' + r'\b|\b'.join(TAGS) + r'\b)\s*'
|
||||
r'(?: \( (?P<who> .*? ) \) )?'
|
||||
r'\s*:?\s* (?P<what> .*? ) \s* $',
|
||||
re.X)
|
||||
|
||||
binary_re = re.compile('[\x00-\x06\x0E-\x1F]')
|
||||
|
||||
|
||||
def escape_html(text):
|
||||
return text.replace('&', '&'). \
|
||||
replace('<', '<'). \
|
||||
replace('>', '>'). \
|
||||
replace('"', '"')
|
||||
|
||||
def process_file(store, filename):
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except (IOError, OSError):
|
||||
return False
|
||||
llmatch = 0
|
||||
try:
|
||||
for lno, line in enumerate(f):
|
||||
# just some random heuristics to filter out binary files
|
||||
if lno < 100 and binary_re.search(line):
|
||||
return False
|
||||
m = tag_re.search(line)
|
||||
if m:
|
||||
store.setdefault(filename, []).append({
|
||||
'lno': lno+1,
|
||||
'tag': m.group('tag'),
|
||||
'who': m.group('who') or '',
|
||||
'what': escape_html(m.group('what')),
|
||||
})
|
||||
# 'what' cannot start at column 0
|
||||
llmatch = m.start('what')
|
||||
elif llmatch:
|
||||
# continuation lines
|
||||
# XXX: this is Python centric, doesn't work for
|
||||
# JavaScript, for example.
|
||||
if line[:llmatch].replace('#', '').isspace():
|
||||
cont = line[llmatch:].strip()
|
||||
if cont:
|
||||
store[filename][-1]['what'] += ' ' + escape_html(cont)
|
||||
continue
|
||||
llmatch = 0
|
||||
return True
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
gopts, args = getopt.getopt(sys.argv[1:], "vo:i:")
|
||||
except getopt.GetoptError:
|
||||
print ("Usage: %s [-v] [-i ignoredir]* [-o reportfile.html] "
|
||||
"path ..." % sys.argv[0])
|
||||
return 2
|
||||
opts = {}
|
||||
for opt, val in gopts:
|
||||
if opt == '-i':
|
||||
val = abspath(val)
|
||||
opts.setdefault(opt, []).append(val)
|
||||
|
||||
if not args:
|
||||
args = ['.']
|
||||
|
||||
if '-o' in opts:
|
||||
output = abspath(opts['-o'][-1])
|
||||
else:
|
||||
output = abspath('tags.html')
|
||||
|
||||
verbose = '-v' in opts
|
||||
|
||||
store = {}
|
||||
gnum = 0
|
||||
num = 0
|
||||
|
||||
for path in args:
|
||||
print "Searching for code tags in %s, please wait." % path
|
||||
|
||||
if isfile(path):
|
||||
gnum += 1
|
||||
if process_file(store, path):
|
||||
if verbose:
|
||||
print path + ": found %d tags" % \
|
||||
(path in store and len(store[path]) or 0)
|
||||
num += 1
|
||||
else:
|
||||
if verbose:
|
||||
print path + ": binary or not readable"
|
||||
continue
|
||||
elif not isdir(path):
|
||||
continue
|
||||
|
||||
for root, dirs, files in os.walk(path):
|
||||
if '-i' in opts and abspath(root) in opts['-i']:
|
||||
del dirs[:]
|
||||
continue
|
||||
if '.svn' in dirs:
|
||||
dirs.remove('.svn')
|
||||
for fn in files:
|
||||
gnum += 1
|
||||
if gnum % 50 == 0 and not verbose:
|
||||
sys.stdout.write('.')
|
||||
sys.stdout.flush()
|
||||
|
||||
fn = join(root, fn)
|
||||
|
||||
if fn.endswith('.pyc') or fn.endswith('.pyo'):
|
||||
continue
|
||||
elif '-i' in opts and abspath(fn) in opts['-i']:
|
||||
continue
|
||||
elif abspath(fn) == output:
|
||||
continue
|
||||
|
||||
if fn[:2] == './': fn = fn[2:]
|
||||
if process_file(store, fn):
|
||||
if verbose:
|
||||
print fn + ": found %d tags" % \
|
||||
(fn in store and len(store[fn]) or 0)
|
||||
num += 1
|
||||
else:
|
||||
if verbose:
|
||||
print fn + ": binary or not readable"
|
||||
print
|
||||
|
||||
print "Processed %d of %d files. Found %d tags in %d files." % (
|
||||
num, gnum, sum(len(fitem) for fitem in store.itervalues()), len(store))
|
||||
|
||||
if not store:
|
||||
return 0
|
||||
|
||||
HTML = '''\
|
||||
<html>
|
||||
<head>
|
||||
<title>Code tags report</title>
|
||||
<style type="text/css">
|
||||
body { font-family: Trebuchet MS,Verdana,sans-serif;
|
||||
width: 80%%; margin-left: auto; margin-right: auto; }
|
||||
table { width: 100%%; border-spacing: 0;
|
||||
border: 1px solid #CCC; }
|
||||
th { font-weight: bold; background-color: #DDD }
|
||||
td { padding: 2px 5px 2px 5px;
|
||||
vertical-align: top; }
|
||||
.tr0 { background-color: #EEEEEE; }
|
||||
.tr1 { background-color: #F6F6F6; }
|
||||
.tag { text-align: center; font-weight: bold; }
|
||||
.tr0 .tag { background-color: #FFEEEE; }
|
||||
.tr1 .tag { background-color: #FFDDDD; }
|
||||
.head { padding-top: 10px; font-size: 100%%; font-weight: bold }
|
||||
.XXX { color: #500; }
|
||||
.FIXME { color: red; }
|
||||
.TODO { color: #880; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Code tags report for %s</h1>
|
||||
<table>
|
||||
<tr><th>Line</th><th>Tag</th><th>Who</th><th>Description</th></tr>
|
||||
%s
|
||||
</table>
|
||||
</body>
|
||||
</html>
|
||||
'''
|
||||
|
||||
TABLE = '\n<tr><td class="head" colspan="4">File: %s</td>\n'
|
||||
|
||||
TR = ('<tr class="tr%d"><td class="lno">%%(lno)d</td>'
|
||||
'<td class="tag %%(tag)s">%%(tag)s</td>'
|
||||
'<td class="who">%%(who)s</td><td class="what">%%(what)s</td></tr>')
|
||||
|
||||
f = file(output, 'w')
|
||||
table = '\n'.join(TABLE % fname +
|
||||
'\n'.join(TR % (no % 2,) % entry
|
||||
for no, entry in enumerate(store[fname]))
|
||||
for fname in sorted(store))
|
||||
f.write(HTML % (', '.join(map(abspath, args)), table))
|
||||
f.close()
|
||||
|
||||
print "Report written to %s." % output
|
||||
return 0
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
170
vendor/pygments/scripts/find_error.py
vendored
170
vendor/pygments/scripts/find_error.py
vendored
@@ -1,170 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Lexing error finder
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
For the source files given on the command line, display
|
||||
the text where Error tokens are being generated, along
|
||||
with some context.
|
||||
|
||||
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
import sys, os
|
||||
|
||||
# always prefer Pygments from source if exists
|
||||
srcpath = os.path.join(os.path.dirname(__file__), '..')
|
||||
if os.path.isdir(os.path.join(srcpath, 'pygments')):
|
||||
sys.path.insert(0, srcpath)
|
||||
|
||||
|
||||
from pygments.lexer import RegexLexer
|
||||
from pygments.lexers import get_lexer_for_filename, get_lexer_by_name
|
||||
from pygments.token import Error, Text, _TokenType
|
||||
from pygments.cmdline import _parse_options
|
||||
|
||||
|
||||
class DebuggingRegexLexer(RegexLexer):
|
||||
"""Make the state stack, position and current match instance attributes."""
|
||||
|
||||
def get_tokens_unprocessed(self, text, stack=('root',)):
|
||||
"""
|
||||
Split ``text`` into (tokentype, text) pairs.
|
||||
|
||||
``stack`` is the inital stack (default: ``['root']``)
|
||||
"""
|
||||
self.pos = 0
|
||||
tokendefs = self._tokens
|
||||
self.statestack = list(stack)
|
||||
statetokens = tokendefs[self.statestack[-1]]
|
||||
while 1:
|
||||
for rexmatch, action, new_state in statetokens:
|
||||
self.m = m = rexmatch(text, self.pos)
|
||||
if m:
|
||||
if type(action) is _TokenType:
|
||||
yield self.pos, action, m.group()
|
||||
else:
|
||||
for item in action(self, m):
|
||||
yield item
|
||||
self.pos = m.end()
|
||||
if new_state is not None:
|
||||
# state transition
|
||||
if isinstance(new_state, tuple):
|
||||
for state in new_state:
|
||||
if state == '#pop':
|
||||
self.statestack.pop()
|
||||
elif state == '#push':
|
||||
self.statestack.append(self.statestack[-1])
|
||||
else:
|
||||
self.statestack.append(state)
|
||||
elif isinstance(new_state, int):
|
||||
# pop
|
||||
del self.statestack[new_state:]
|
||||
elif new_state == '#push':
|
||||
self.statestack.append(self.statestack[-1])
|
||||
else:
|
||||
assert False, 'wrong state def: %r' % new_state
|
||||
statetokens = tokendefs[self.statestack[-1]]
|
||||
break
|
||||
else:
|
||||
try:
|
||||
if text[self.pos] == '\n':
|
||||
# at EOL, reset state to 'root'
|
||||
self.pos += 1
|
||||
self.statestack = ['root']
|
||||
statetokens = tokendefs['root']
|
||||
yield self.pos, Text, u'\n'
|
||||
continue
|
||||
yield self.pos, Error, text[self.pos]
|
||||
self.pos += 1
|
||||
except IndexError:
|
||||
break
|
||||
|
||||
|
||||
def main(fn, lexer=None, options={}):
|
||||
if lexer is not None:
|
||||
lx = get_lexer_by_name(lexer)
|
||||
else:
|
||||
try:
|
||||
lx = get_lexer_for_filename(os.path.basename(fn), **options)
|
||||
except ValueError:
|
||||
try:
|
||||
name, rest = fn.split('_', 1)
|
||||
lx = get_lexer_by_name(name, **options)
|
||||
except ValueError:
|
||||
raise AssertionError('no lexer found for file %r' % fn)
|
||||
debug_lexer = False
|
||||
# does not work for e.g. ExtendedRegexLexers
|
||||
if lx.__class__.__bases__ == (RegexLexer,):
|
||||
lx.__class__.__bases__ = (DebuggingRegexLexer,)
|
||||
debug_lexer = True
|
||||
elif lx.__class__.__bases__ == (DebuggingRegexLexer,):
|
||||
# already debugged before
|
||||
debug_lexer = True
|
||||
lno = 1
|
||||
text = file(fn, 'U').read()
|
||||
text = text.strip('\n') + '\n'
|
||||
tokens = []
|
||||
states = []
|
||||
|
||||
def show_token(tok, state):
|
||||
reprs = map(repr, tok)
|
||||
print ' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0],
|
||||
if debug_lexer:
|
||||
print ' ' + ' ' * (29-len(reprs[0])) + repr(state),
|
||||
print
|
||||
|
||||
for type, val in lx.get_tokens(text):
|
||||
lno += val.count('\n')
|
||||
if type == Error:
|
||||
print 'Error parsing', fn, 'on line', lno
|
||||
print 'Previous tokens' + (debug_lexer and ' and states' or '') + ':'
|
||||
if showall:
|
||||
for tok, state in map(None, tokens, states):
|
||||
show_token(tok, state)
|
||||
else:
|
||||
for i in range(max(len(tokens) - num, 0), len(tokens)):
|
||||
show_token(tokens[i], states[i])
|
||||
print 'Error token:'
|
||||
l = len(repr(val))
|
||||
print ' ' + repr(val),
|
||||
if debug_lexer and hasattr(lx, 'statestack'):
|
||||
print ' ' * (60-l) + repr(lx.statestack),
|
||||
print
|
||||
print
|
||||
return 1
|
||||
tokens.append((type, val))
|
||||
if debug_lexer:
|
||||
if hasattr(lx, 'statestack'):
|
||||
states.append(lx.statestack[:])
|
||||
else:
|
||||
states.append(None)
|
||||
if showall:
|
||||
for tok, state in map(None, tokens, states):
|
||||
show_token(tok, state)
|
||||
return 0
|
||||
|
||||
|
||||
num = 10
|
||||
showall = False
|
||||
lexer = None
|
||||
options = {}
|
||||
|
||||
if __name__ == '__main__':
|
||||
import getopt
|
||||
opts, args = getopt.getopt(sys.argv[1:], 'n:l:aO:')
|
||||
for opt, val in opts:
|
||||
if opt == '-n':
|
||||
num = int(val)
|
||||
elif opt == '-a':
|
||||
showall = True
|
||||
elif opt == '-l':
|
||||
lexer = val
|
||||
elif opt == '-O':
|
||||
options = _parse_options([val])
|
||||
ret = 0
|
||||
for f in args:
|
||||
ret += main(f, lexer, options)
|
||||
sys.exit(bool(ret))
|
||||
1
vendor/pygments/scripts/find_error.py
vendored
Symbolic link
1
vendor/pygments/scripts/find_error.py
vendored
Symbolic link
@@ -0,0 +1 @@
|
||||
debug_lexer.py
|
||||
43
vendor/pygments/scripts/get_vimkw.py
vendored
43
vendor/pygments/scripts/get_vimkw.py
vendored
@@ -1,13 +1,42 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import re
|
||||
from pprint import pprint
|
||||
|
||||
from pygments.util import format_lines
|
||||
|
||||
r_line = re.compile(r"^(syn keyword vimCommand contained|syn keyword vimOption "
|
||||
r"contained|syn keyword vimAutoEvent contained)\s+(.*)")
|
||||
r_item = re.compile(r"(\w+)(?:\[(\w+)\])?")
|
||||
|
||||
HEADER = '''\
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
pygments.lexers._vim_builtins
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This file is autogenerated by scripts/get_vimkw.py
|
||||
|
||||
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
# Split up in multiple functions so it's importable by jython, which has a
|
||||
# per-method size limit.
|
||||
'''
|
||||
|
||||
METHOD = '''\
|
||||
def _get%(key)s():
|
||||
%(body)s
|
||||
return var
|
||||
%(key)s = _get%(key)s()
|
||||
'''
|
||||
|
||||
def getkw(input, output):
|
||||
out = file(output, 'w')
|
||||
|
||||
# Copy template from an existing file.
|
||||
print(HEADER, file=out)
|
||||
|
||||
output_info = {'command': [], 'option': [], 'auto': []}
|
||||
for line in file(input):
|
||||
m = r_line.match(line)
|
||||
@@ -29,15 +58,17 @@ def getkw(input, output):
|
||||
output_info['option'].append("('inoremap','inoremap')")
|
||||
output_info['option'].append("('vnoremap','vnoremap')")
|
||||
|
||||
for a, b in output_info.items():
|
||||
b.sort()
|
||||
print >>out, '%s=[%s]' % (a, ','.join(b))
|
||||
for key, keywordlist in output_info.items():
|
||||
keywordlist.sort()
|
||||
body = format_lines('var', keywordlist, raw=True, indent_level=1)
|
||||
print(METHOD % locals(), file=out)
|
||||
|
||||
def is_keyword(w, keywords):
|
||||
for i in range(len(w), 0, -1):
|
||||
if w[:i] in keywords:
|
||||
return signals[w[:i]][:len(w)] == w
|
||||
return keywords[w[:i]][:len(w)] == w
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
getkw("/usr/share/vim/vim73/syntax/vim.vim", "temp.py")
|
||||
getkw("/usr/share/vim/vim74/syntax/vim.vim",
|
||||
"pygments/lexers/_vim_builtins.py")
|
||||
|
||||
291
vendor/pygments/scripts/reindent.py
vendored
291
vendor/pygments/scripts/reindent.py
vendored
@@ -1,291 +0,0 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
# Released to the public domain, by Tim Peters, 03 October 2000.
|
||||
# -B option added by Georg Brandl, 2006.
|
||||
|
||||
"""reindent [-d][-r][-v] [ path ... ]
|
||||
|
||||
-d (--dryrun) Dry run. Analyze, but don't make any changes to files.
|
||||
-r (--recurse) Recurse. Search for all .py files in subdirectories too.
|
||||
-B (--no-backup) Don't write .bak backup files.
|
||||
-v (--verbose) Verbose. Print informative msgs; else only names of changed files.
|
||||
-h (--help) Help. Print this usage information and exit.
|
||||
|
||||
Change Python (.py) files to use 4-space indents and no hard tab characters.
|
||||
Also trim excess spaces and tabs from ends of lines, and remove empty lines
|
||||
at the end of files. Also ensure the last line ends with a newline.
|
||||
|
||||
If no paths are given on the command line, reindent operates as a filter,
|
||||
reading a single source file from standard input and writing the transformed
|
||||
source to standard output. In this case, the -d, -r and -v flags are
|
||||
ignored.
|
||||
|
||||
You can pass one or more file and/or directory paths. When a directory
|
||||
path, all .py files within the directory will be examined, and, if the -r
|
||||
option is given, likewise recursively for subdirectories.
|
||||
|
||||
If output is not to standard output, reindent overwrites files in place,
|
||||
renaming the originals with a .bak extension. If it finds nothing to
|
||||
change, the file is left alone. If reindent does change a file, the changed
|
||||
file is a fixed-point for future runs (i.e., running reindent on the
|
||||
resulting .py file won't change it again).
|
||||
|
||||
The hard part of reindenting is figuring out what to do with comment
|
||||
lines. So long as the input files get a clean bill of health from
|
||||
tabnanny.py, reindent should do a good job.
|
||||
"""
|
||||
|
||||
__version__ = "1"
|
||||
|
||||
import tokenize
|
||||
import os
|
||||
import sys
|
||||
|
||||
verbose = 0
|
||||
recurse = 0
|
||||
dryrun = 0
|
||||
no_backup = 0
|
||||
|
||||
def usage(msg=None):
|
||||
if msg is not None:
|
||||
print >> sys.stderr, msg
|
||||
print >> sys.stderr, __doc__
|
||||
|
||||
def errprint(*args):
|
||||
sep = ""
|
||||
for arg in args:
|
||||
sys.stderr.write(sep + str(arg))
|
||||
sep = " "
|
||||
sys.stderr.write("\n")
|
||||
|
||||
def main():
|
||||
import getopt
|
||||
global verbose, recurse, dryrun, no_backup
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "drvhB",
|
||||
["dryrun", "recurse", "verbose", "help",
|
||||
"no-backup"])
|
||||
except getopt.error, msg:
|
||||
usage(msg)
|
||||
return
|
||||
for o, a in opts:
|
||||
if o in ('-d', '--dryrun'):
|
||||
dryrun += 1
|
||||
elif o in ('-r', '--recurse'):
|
||||
recurse += 1
|
||||
elif o in ('-v', '--verbose'):
|
||||
verbose += 1
|
||||
elif o in ('-B', '--no-backup'):
|
||||
no_backup += 1
|
||||
elif o in ('-h', '--help'):
|
||||
usage()
|
||||
return
|
||||
if not args:
|
||||
r = Reindenter(sys.stdin)
|
||||
r.run()
|
||||
r.write(sys.stdout)
|
||||
return
|
||||
for arg in args:
|
||||
check(arg)
|
||||
|
||||
def check(file):
|
||||
if os.path.isdir(file) and not os.path.islink(file):
|
||||
if verbose:
|
||||
print "listing directory", file
|
||||
names = os.listdir(file)
|
||||
for name in names:
|
||||
fullname = os.path.join(file, name)
|
||||
if ((recurse and os.path.isdir(fullname) and
|
||||
not os.path.islink(fullname))
|
||||
or name.lower().endswith(".py")):
|
||||
check(fullname)
|
||||
return
|
||||
|
||||
if verbose:
|
||||
print "checking", file, "...",
|
||||
try:
|
||||
f = open(file)
|
||||
except IOError, msg:
|
||||
errprint("%s: I/O Error: %s" % (file, str(msg)))
|
||||
return
|
||||
|
||||
r = Reindenter(f)
|
||||
f.close()
|
||||
if r.run():
|
||||
if verbose:
|
||||
print "changed."
|
||||
if dryrun:
|
||||
print "But this is a dry run, so leaving it alone."
|
||||
else:
|
||||
print "reindented", file, (dryrun and "(dry run => not really)" or "")
|
||||
if not dryrun:
|
||||
if not no_backup:
|
||||
bak = file + ".bak"
|
||||
if os.path.exists(bak):
|
||||
os.remove(bak)
|
||||
os.rename(file, bak)
|
||||
if verbose:
|
||||
print "renamed", file, "to", bak
|
||||
f = open(file, "w")
|
||||
r.write(f)
|
||||
f.close()
|
||||
if verbose:
|
||||
print "wrote new", file
|
||||
else:
|
||||
if verbose:
|
||||
print "unchanged."
|
||||
|
||||
|
||||
class Reindenter:
|
||||
|
||||
def __init__(self, f):
|
||||
self.find_stmt = 1 # next token begins a fresh stmt?
|
||||
self.level = 0 # current indent level
|
||||
|
||||
# Raw file lines.
|
||||
self.raw = f.readlines()
|
||||
|
||||
# File lines, rstripped & tab-expanded. Dummy at start is so
|
||||
# that we can use tokenize's 1-based line numbering easily.
|
||||
# Note that a line is all-blank iff it's "\n".
|
||||
self.lines = [line.rstrip('\n \t').expandtabs() + "\n"
|
||||
for line in self.raw]
|
||||
self.lines.insert(0, None)
|
||||
self.index = 1 # index into self.lines of next line
|
||||
|
||||
# List of (lineno, indentlevel) pairs, one for each stmt and
|
||||
# comment line. indentlevel is -1 for comment lines, as a
|
||||
# signal that tokenize doesn't know what to do about them;
|
||||
# indeed, they're our headache!
|
||||
self.stats = []
|
||||
|
||||
def run(self):
|
||||
tokenize.tokenize(self.getline, self.tokeneater)
|
||||
# Remove trailing empty lines.
|
||||
lines = self.lines
|
||||
while lines and lines[-1] == "\n":
|
||||
lines.pop()
|
||||
# Sentinel.
|
||||
stats = self.stats
|
||||
stats.append((len(lines), 0))
|
||||
# Map count of leading spaces to # we want.
|
||||
have2want = {}
|
||||
# Program after transformation.
|
||||
after = self.after = []
|
||||
# Copy over initial empty lines -- there's nothing to do until
|
||||
# we see a line with *something* on it.
|
||||
i = stats[0][0]
|
||||
after.extend(lines[1:i])
|
||||
for i in range(len(stats)-1):
|
||||
thisstmt, thislevel = stats[i]
|
||||
nextstmt = stats[i+1][0]
|
||||
have = getlspace(lines[thisstmt])
|
||||
want = thislevel * 4
|
||||
if want < 0:
|
||||
# A comment line.
|
||||
if have:
|
||||
# An indented comment line. If we saw the same
|
||||
# indentation before, reuse what it most recently
|
||||
# mapped to.
|
||||
want = have2want.get(have, -1)
|
||||
if want < 0:
|
||||
# Then it probably belongs to the next real stmt.
|
||||
for j in xrange(i+1, len(stats)-1):
|
||||
jline, jlevel = stats[j]
|
||||
if jlevel >= 0:
|
||||
if have == getlspace(lines[jline]):
|
||||
want = jlevel * 4
|
||||
break
|
||||
if want < 0: # Maybe it's a hanging
|
||||
# comment like this one,
|
||||
# in which case we should shift it like its base
|
||||
# line got shifted.
|
||||
for j in xrange(i-1, -1, -1):
|
||||
jline, jlevel = stats[j]
|
||||
if jlevel >= 0:
|
||||
want = have + getlspace(after[jline-1]) - \
|
||||
getlspace(lines[jline])
|
||||
break
|
||||
if want < 0:
|
||||
# Still no luck -- leave it alone.
|
||||
want = have
|
||||
else:
|
||||
want = 0
|
||||
assert want >= 0
|
||||
have2want[have] = want
|
||||
diff = want - have
|
||||
if diff == 0 or have == 0:
|
||||
after.extend(lines[thisstmt:nextstmt])
|
||||
else:
|
||||
for line in lines[thisstmt:nextstmt]:
|
||||
if diff > 0:
|
||||
if line == "\n":
|
||||
after.append(line)
|
||||
else:
|
||||
after.append(" " * diff + line)
|
||||
else:
|
||||
remove = min(getlspace(line), -diff)
|
||||
after.append(line[remove:])
|
||||
return self.raw != self.after
|
||||
|
||||
def write(self, f):
|
||||
f.writelines(self.after)
|
||||
|
||||
# Line-getter for tokenize.
|
||||
def getline(self):
|
||||
if self.index >= len(self.lines):
|
||||
line = ""
|
||||
else:
|
||||
line = self.lines[self.index]
|
||||
self.index += 1
|
||||
return line
|
||||
|
||||
# Line-eater for tokenize.
|
||||
def tokeneater(self, type, token, (sline, scol), end, line,
|
||||
INDENT=tokenize.INDENT,
|
||||
DEDENT=tokenize.DEDENT,
|
||||
NEWLINE=tokenize.NEWLINE,
|
||||
COMMENT=tokenize.COMMENT,
|
||||
NL=tokenize.NL):
|
||||
|
||||
if type == NEWLINE:
|
||||
# A program statement, or ENDMARKER, will eventually follow,
|
||||
# after some (possibly empty) run of tokens of the form
|
||||
# (NL | COMMENT)* (INDENT | DEDENT+)?
|
||||
self.find_stmt = 1
|
||||
|
||||
elif type == INDENT:
|
||||
self.find_stmt = 1
|
||||
self.level += 1
|
||||
|
||||
elif type == DEDENT:
|
||||
self.find_stmt = 1
|
||||
self.level -= 1
|
||||
|
||||
elif type == COMMENT:
|
||||
if self.find_stmt:
|
||||
self.stats.append((sline, -1))
|
||||
# but we're still looking for a new stmt, so leave
|
||||
# find_stmt alone
|
||||
|
||||
elif type == NL:
|
||||
pass
|
||||
|
||||
elif self.find_stmt:
|
||||
# This is the first "real token" following a NEWLINE, so it
|
||||
# must be the first token of the next program statement, or an
|
||||
# ENDMARKER.
|
||||
self.find_stmt = 0
|
||||
if line: # not endmarker
|
||||
self.stats.append((sline, self.level))
|
||||
|
||||
# Count number of leading blanks.
|
||||
def getlspace(line):
|
||||
i, n = 0, len(line)
|
||||
while i < n and line[i] == " ":
|
||||
i += 1
|
||||
return i
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
16
vendor/pygments/scripts/vim2pygments.py
vendored
Normal file → Executable file
16
vendor/pygments/scripts/vim2pygments.py
vendored
Normal file → Executable file
@@ -11,10 +11,12 @@
|
||||
:license: BSD, see LICENSE for details.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
import re
|
||||
from os import path
|
||||
from cStringIO import StringIO
|
||||
from io import StringIO
|
||||
|
||||
split_re = re.compile(r'(?<!\\)\s+')
|
||||
|
||||
@@ -765,7 +767,7 @@ TOKENS = {
|
||||
}
|
||||
|
||||
TOKEN_TYPES = set()
|
||||
for token in TOKENS.itervalues():
|
||||
for token in TOKENS.values():
|
||||
if not isinstance(token, tuple):
|
||||
token = (token,)
|
||||
for token in token:
|
||||
@@ -836,7 +838,7 @@ def find_colors(code):
|
||||
colors['Normal']['bgcolor'] = bg_color
|
||||
|
||||
color_map = {}
|
||||
for token, styles in colors.iteritems():
|
||||
for token, styles in colors.items():
|
||||
if token in TOKENS:
|
||||
tmp = []
|
||||
if styles.get('noinherit'):
|
||||
@@ -879,7 +881,7 @@ class StyleWriter(object):
|
||||
def write(self, out):
|
||||
self.write_header(out)
|
||||
default_token, tokens = find_colors(self.code)
|
||||
tokens = tokens.items()
|
||||
tokens = list(tokens.items())
|
||||
tokens.sort(lambda a, b: cmp(len(a[0]), len(a[1])))
|
||||
bg_color = [x[3:] for x in default_token.split() if x.startswith('bg:')]
|
||||
if bg_color:
|
||||
@@ -916,14 +918,14 @@ def convert(filename, stream=None):
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2 or sys.argv[1] in ('-h', '--help'):
|
||||
print 'Usage: %s <filename.vim>' % sys.argv[0]
|
||||
print('Usage: %s <filename.vim>' % sys.argv[0])
|
||||
return 2
|
||||
if sys.argv[1] in ('-v', '--version'):
|
||||
print '%s %s' % (SCRIPT_NAME, SCRIPT_VERSION)
|
||||
print('%s %s' % (SCRIPT_NAME, SCRIPT_VERSION))
|
||||
return
|
||||
filename = sys.argv[1]
|
||||
if not (path.exists(filename) and path.isfile(filename)):
|
||||
print 'Error: %s not found' % filename
|
||||
print('Error: %s not found' % filename)
|
||||
return 1
|
||||
convert(filename, sys.stdout)
|
||||
sys.stdout.write('\n')
|
||||
|
||||
Reference in New Issue
Block a user