load_grammar now collects all imports to make before loading them to namespace

This commit is contained in:
PJCampi 2019-04-12 10:22:25 +02:00
parent b055bc6399
commit ccbaebdc6f
4 changed files with 43 additions and 18 deletions

View File

@ -725,7 +725,7 @@ class GrammarLoader:
rule_defs = [options_from_rule(*x) for x in rule_defs]
# Execute statements
ignore = []
ignore, imports = [], {}
for (stmt,) in statements:
if stmt.data == 'ignore':
t ,= stmt.children
@ -734,22 +734,20 @@ class GrammarLoader:
if len(stmt.children) > 1:
path_node, arg1 = stmt.children
else:
path_node ,= stmt.children
path_node, = stmt.children
arg1 = None
if isinstance(arg1, Tree): # Multi import
dotted_path = path_node.children
dotted_path = tuple(path_node.children)
names = arg1.children
aliases = names # Can't have aliased multi import, so all aliases will be the same as names
aliases = dict(zip(names, names)) # Can't have aliased multi import, so all aliases will be the same as names
else: # Single import
dotted_path = path_node.children[:-1]
names = [path_node.children[-1]] # Get name from dotted path
aliases = [arg1] if arg1 else names # Aliases if exist
grammar_path = os.path.join(*dotted_path) + EXT
dotted_path = tuple(path_node.children[:-1])
name = path_node.children[-1] # Get name from dotted path
aliases = {name: arg1 or name} # Aliases if exist
if path_node.data == 'import_lib': # Import from library
g = import_grammar(grammar_path)
base_paths = []
else: # Relative import
if grammar_name == '<string>': # Import relative to script file path if grammar is coded in script
try:
@ -759,16 +757,16 @@ class GrammarLoader:
else:
base_file = grammar_name # Import relative to grammar file path if external grammar file
if base_file:
base_path = os.path.split(base_file)[0]
base_paths = [os.path.split(base_file)[0]]
else:
base_path = os.path.abspath(os.path.curdir)
g = import_grammar(grammar_path, base_paths=[base_path])
base_paths = [os.path.abspath(os.path.curdir)]
aliases_dict = dict(zip(names, aliases))
new_td, new_rd = import_from_grammar_into_namespace(g, '__'.join(dotted_path), aliases_dict)
term_defs += new_td
rule_defs += new_rd
try:
import_base_paths, import_aliases = imports[dotted_path]
assert base_paths == import_base_paths, 'Inconsistent base_paths for %s.' % '.'.join(dotted_path)
import_aliases.update(aliases)
except KeyError:
imports[dotted_path] = base_paths, aliases
elif stmt.data == 'declare':
for t in stmt.children:
@ -776,6 +774,14 @@ class GrammarLoader:
else:
assert False, stmt
# import grammars
for dotted_path, (base_paths, aliases) in imports.items():
grammar_path = os.path.join(*dotted_path) + EXT
g = import_grammar(grammar_path, base_paths=base_paths)
new_td, new_rd = import_from_grammar_into_namespace(g, '__'.join(dotted_path), aliases)
term_defs += new_td
rule_defs += new_rd
# Verify correctness 1
for name, _ in term_defs:

View File

@ -0,0 +1,7 @@
%import common.INT
a: A
b: A
c: A
A: "A"

View File

@ -1109,6 +1109,13 @@ def _make_parser_test(LEXER, PARSER):
x = l.parse('N')
self.assertEqual(next(x.find_data('rule_to_import')).children, ['N'])
def test_relative_import_rules_dependencies_imported_only_once(self):
l = _Lark_open("test_relative_import_rules_dependencies_imported_only_once.lark", rel_to=__file__)
x = l.parse('AAA')
self.assertEqual(next(x.find_data('a')).children, ['A'])
self.assertEqual(next(x.find_data('b')).children, ['A'])
self.assertEqual(next(x.find_data('d')).children, ['A'])
def test_import_errors(self):
grammar = """
start: NUMBER WORD

View File

@ -0,0 +1,5 @@
%import .grammars.three_rules_using_same_token.a
%import .grammars.three_rules_using_same_token.b
%import .grammars.three_rules_using_same_token.c -> d
start: a b d