2014-07-06 22:02:55 +00:00
|
|
|
import os
|
|
|
|
from os import path
|
|
|
|
import codecs
|
|
|
|
|
|
|
|
DATA_DIR = path.join(path.dirname(__file__), '..', 'data')
|
|
|
|
|
|
|
|
|
2014-07-05 18:51:42 +00:00
|
|
|
def utf8open(loc, mode='r'):
|
|
|
|
return codecs.open(loc, mode, 'utf8')
|
|
|
|
|
|
|
|
|
|
|
|
def load_case_stats(data_dir):
|
2014-07-07 05:36:43 +00:00
|
|
|
case_loc = path.join(data_dir, 'case')
|
2014-07-05 18:51:42 +00:00
|
|
|
case_stats = {}
|
|
|
|
with utf8open(case_loc) as cases_file:
|
|
|
|
for line in cases_file:
|
|
|
|
word, upper, title = line.split()
|
|
|
|
case_stats[word] = (float(upper), float(title))
|
|
|
|
return case_stats
|
|
|
|
|
|
|
|
|
2014-08-19 00:40:37 +00:00
|
|
|
def load_dist_info(lang):
|
|
|
|
with path.join(DATA_DIR, lang, 'distribution_info.json') as file_:
|
|
|
|
dist_info = json.load(file_)
|
|
|
|
return dist_info
|
|
|
|
|
|
|
|
|
2014-07-06 22:02:55 +00:00
|
|
|
def read_tokenization(lang):
|
|
|
|
loc = path.join(DATA_DIR, lang, 'tokenization')
|
2014-07-05 18:51:42 +00:00
|
|
|
entries = []
|
|
|
|
seen = set()
|
2014-07-06 22:02:55 +00:00
|
|
|
with utf8open(loc) as file_:
|
|
|
|
for line in file_:
|
2014-07-05 18:51:42 +00:00
|
|
|
line = line.strip()
|
|
|
|
if line.startswith('#'):
|
|
|
|
continue
|
|
|
|
if not line:
|
|
|
|
continue
|
2014-07-06 22:02:55 +00:00
|
|
|
pieces = line.split()
|
|
|
|
chunk = pieces.pop(0)
|
|
|
|
assert chunk not in seen, chunk
|
|
|
|
seen.add(chunk)
|
2014-08-18 17:14:00 +00:00
|
|
|
entries.append((chunk, list(pieces)))
|
2014-07-07 03:07:21 +00:00
|
|
|
if chunk[0].isalpha() and chunk[0].islower():
|
|
|
|
chunk = chunk[0].title() + chunk[1:]
|
2014-08-18 17:14:00 +00:00
|
|
|
pieces[0] = pieces[0][0].title() + pieces[0][1:]
|
2014-07-07 03:07:21 +00:00
|
|
|
seen.add(chunk)
|
2014-08-18 17:14:00 +00:00
|
|
|
entries.append((chunk, pieces))
|
2014-07-05 18:51:42 +00:00
|
|
|
return entries
|