cpython/Lib/tokenize.py

195 lines
8.5 KiB
Python
Raw Normal View History

"""Tokenization help for Python programs.
This module exports a function called 'tokenize()' that breaks a stream of
text into Python tokens. It accepts a readline-like method which is called
repeatedly to get the next line of input (or "" for EOF) and a "token-eater"
function which is called once for each token found. The latter function is
passed the token type, a string containing the token, the starting and
ending (row, column) coordinates of the token, and the original line. It is
designed to match the working of the Python tokenizer exactly, except that
it produces COMMENT tokens for comments and gives type OP for all operators."""
__version__ = "Ka-Ping Yee, 26 October 1997"
import string, re
1997-03-07 00:21:12 +00:00
from token import *
1992-01-01 19:34:47 +00:00
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
# Changes from 1.3:
# Ignore now accepts \f as whitespace. Operator now includes '**'.
# Ignore and Special now accept \n or \r\n at the end of a line.
# Imagnumber is new. Expfloat is corrected to reject '0e4'.
# Note: to quote a backslash in a regex, it must be doubled in a r'aw' string.
def group(*choices): return '(' + string.join(choices, '|') + ')'
def any(*choices): return apply(group, choices) + '*'
def maybe(*choices): return apply(group, choices) + '?'
1992-01-01 19:34:47 +00:00
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'[a-zA-Z_]\w*'
1992-01-01 19:34:47 +00:00
Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
Octnumber = r'0[0-7]*[lL]?'
Decnumber = r'[1-9]\d*[lL]?'
Intnumber = group(Hexnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?\d+'
Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
Expfloat = r'[1-9]\d*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'0[jJ]', r'[1-9]\d*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
1992-01-01 19:34:47 +00:00
Single = any(r"[^'\\]", r'\\.') + "'"
Double = any(r'[^"\\]', r'\\.') + '"'
Single3 = any(r"[^'\\]",r'\\.',r"'[^'\\]",r"'\\.",r"''[^'\\]",r"''\\.") + "'''"
Double3 = any(r'[^"\\]',r'\\.',r'"[^"\\]',r'"\\.',r'""[^"\\]',r'""\\.') + '"""'
Triple = group("[rR]?'''", '[rR]?"""')
String = group("[rR]?'" + any(r"[^\n'\\]", r'\\.') + "'",
'[rR]?"' + any(r'[^\n"\\]', r'\\.') + '"')
1992-01-01 19:34:47 +00:00
Operator = group('\+', '\-', '\*\*', '\*', '\^', '~', '/', '%', '&', '\|',
1997-03-07 00:21:12 +00:00
'<<', '>>', '==', '<=', '<>', '!=', '>=', '=', '<', '>')
1992-01-01 19:34:47 +00:00
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'[:;.,`]')
1997-03-07 00:21:12 +00:00
Funny = group(Operator, Bracket, Special)
1992-01-01 19:34:47 +00:00
PlainToken = group(Number, Funny, String, Name)
1997-03-07 00:21:12 +00:00
Token = Ignore + PlainToken
1992-01-01 19:34:47 +00:00
ContStr = group("[rR]?'" + any(r'\\.', r"[^\n'\\]") + group("'", r'\\\r?\n'),
'[rR]?"' + any(r'\\.', r'[^\n"\\]') + group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
tokenprog, pseudoprog, single3prog, double3prog = map(
re.compile, (Token, PseudoToken, Single3, Double3))
endprogs = {"'": re.compile(Single), '"': re.compile(Double),
"'''": single3prog, '"""': double3prog,
"r'''": single3prog, 'r"""': double3prog,
"R'''": single3prog, 'R"""': double3prog, 'r': None, 'R': None}
1997-03-07 00:21:12 +00:00
tabsize = 8
TokenError = 'TokenError'
def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
print "%d,%d-%d,%d:\t%s\t%s" % \
(srow, scol, erow, ecol, tok_name[type], repr(token))
1997-03-07 00:21:12 +00:00
def tokenize(readline, tokeneater=printtoken):
lnum = parenlev = continued = 0
1997-03-07 00:21:12 +00:00
namechars, numchars = string.letters + '_', string.digits
contstr, needcont = '', 0
1997-03-07 00:21:12 +00:00
indents = [0]
1997-03-07 00:21:12 +00:00
while 1: # loop over lines in stream
line = readline()
lnum = lnum + 1
1997-03-07 00:21:12 +00:00
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError, ("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
tokeneater(STRING, contstr + line[:end],
strstart, (lnum, end), line)
contstr, needcont = '', 0
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
tokeneater(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), line)
1997-03-07 00:21:12 +00:00
contstr = ''
continue
1997-03-07 00:21:12 +00:00
else:
contstr = contstr + line
continue
elif parenlev == 0 and not continued: # new statement
1997-03-07 00:21:12 +00:00
if not line: break
column = 0
while pos < max: # measure leading whitespace
1997-03-07 00:21:12 +00:00
if line[pos] == ' ': column = column + 1
elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
1997-03-07 00:21:12 +00:00
elif line[pos] == '\f': column = 0
else: break
pos = pos + 1
if pos == max: break
if line[pos] in '#\r\n': # skip comments or blank lines
tokeneater((NEWLINE, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
1997-03-07 00:21:12 +00:00
if column > indents[-1]: # count indents or dedents
indents.append(column)
tokeneater(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
1997-03-07 00:21:12 +00:00
while column < indents[-1]:
indents = indents[:-1]
tokeneater(DEDENT, '', (lnum, pos), (lnum, pos), line)
1997-03-07 00:21:12 +00:00
else: # continued statement
if not line:
raise TokenError, ("EOF in multi-line statement", (lnum, 0))
1997-03-07 00:21:12 +00:00
continued = 0
while pos < max:
pseudomatch = pseudoprog.match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
token, initial = line[start:end], line[start]
1997-03-07 00:21:12 +00:00
if initial in numchars \
or (initial == '.' and token != '.'): # ordinary number
tokeneater(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
tokeneater(NEWLINE, token, spos, epos, line)
elif initial == '#':
tokeneater(COMMENT, token, spos, epos, line)
elif token in ("'''", '"""', # triple-quoted
"r'''", 'r"""', "R'''", 'R"""'):
1997-03-07 00:21:12 +00:00
endprog = endprogs[token]
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
tokeneater(STRING, token, spos, (lnum, pos), line)
1997-03-07 00:21:12 +00:00
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
1997-03-07 00:21:12 +00:00
break
elif initial in ("'", '"') or \
token[:2] in ("r'", 'r"', "R'", 'R"'):
1997-03-07 00:21:12 +00:00
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = endprogs[initial] or endprogs[token[1]]
contstr, needcont = line[start:], 1
1997-03-07 00:21:12 +00:00
break
else: # ordinary string
tokeneater(STRING, token, spos, epos, line)
elif initial in namechars: # ordinary name
tokeneater(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
1997-03-07 00:21:12 +00:00
else:
if initial in '([{': parenlev = parenlev + 1
elif initial in ')]}': parenlev = parenlev - 1
tokeneater(OP, token, spos, epos, line)
1997-03-07 00:21:12 +00:00
else:
tokeneater(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
1997-03-07 00:21:12 +00:00
pos = pos + 1
for indent in indents[1:]: # pop remaining indent levels
tokeneater(DEDENT, '', (lnum, 0), (lnum, 0), '')
tokeneater(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
1997-03-07 00:21:12 +00:00
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
else: tokenize(sys.stdin.readline)