2015-01-14 19:50:31 +00:00
|
|
|
import asyncio
|
|
|
|
|
2017-01-31 17:19:44 +00:00
|
|
|
from peru.async_helpers import run_task
|
2015-01-14 19:50:31 +00:00
|
|
|
import peru.scope
|
rewrite the cache to be asynchronous
We've been seeing "Task X took Y seconds" warnings in our tests for a
long time, especially on Windows. Running git commands synchronously
blocks other tasks from running, like display redrawing. It's bad
practice in an async program.
One of the barriers to async-ifying the cache code earlier was that many
commands relied on having exclusive ownership of the index file while
they were running. For example, 1) read a tree into the index, 2) merge
another tree into some subdirectory, 3) write out the result. If any
other git commands ran in the middle of that, it would screw up the
result. So we need to rewrite every cache function to use its own
temporary index file, if we want them to run in parallel.
The reason I'm finally getting around to this now, is that I'm trying to
reduce the number of git commands that run in a no-op sync. One of the
optimizations I'm going to want to do, is to reuse the index file from
the last sync, so that we don't need a `read-tree` and an `update-index`
just to set us up for `diff-files`. But the plumbing to do that right is
pretty much the same as what we should be doing to run every git command
with its own index anyway. So let's just bite the bullet and do that
now, and then reusing index files will be easy after that.
2015-11-21 20:34:29 +00:00
|
|
|
import shared
|
2015-01-14 19:50:31 +00:00
|
|
|
|
|
|
|
|
rewrite the cache to be asynchronous
We've been seeing "Task X took Y seconds" warnings in our tests for a
long time, especially on Windows. Running git commands synchronously
blocks other tasks from running, like display redrawing. It's bad
practice in an async program.
One of the barriers to async-ifying the cache code earlier was that many
commands relied on having exclusive ownership of the index file while
they were running. For example, 1) read a tree into the index, 2) merge
another tree into some subdirectory, 3) write out the result. If any
other git commands ran in the middle of that, it would screw up the
result. So we need to rewrite every cache function to use its own
temporary index file, if we want them to run in parallel.
The reason I'm finally getting around to this now, is that I'm trying to
reduce the number of git commands that run in a no-op sync. One of the
optimizations I'm going to want to do, is to reuse the index file from
the last sync, so that we don't need a `read-tree` and an `update-index`
just to set us up for `diff-files`. But the plumbing to do that right is
pretty much the same as what we should be doing to run every git command
with its own index anyway. So let's just bite the bullet and do that
now, and then reusing index files will be easy after that.
2015-11-21 20:34:29 +00:00
|
|
|
class ScopeTest(shared.PeruTest):
|
2015-01-14 19:50:31 +00:00
|
|
|
def test_parse_target(self):
|
|
|
|
scope = scope_tree_to_scope({
|
|
|
|
'modules': {
|
|
|
|
'a': {
|
|
|
|
'modules': {
|
|
|
|
'b': {
|
|
|
|
'modules': {'c': {}},
|
|
|
|
'rules': ['r'],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
c, (r,) = run_task(scope.parse_target(DummyRuntime(), 'a.b.c|a.b.r'))
|
|
|
|
assert type(c) is DummyModule and c.name == 'a.b.c'
|
|
|
|
assert type(r) is DummyRule and r.name == 'a.b.r'
|
|
|
|
|
|
|
|
|
|
|
|
def scope_tree_to_scope(tree, prefix=""):
|
|
|
|
'''This function is for generating dummy scope/module/rule hierarchies for
|
|
|
|
testing. A scope tree contains a modules dictionary and a rules list, both
|
|
|
|
optional. The values of the modules dictionary are themselves scope trees.
|
|
|
|
So if module A contains module B and rule R, that's represented as:
|
|
|
|
|
|
|
|
{
|
|
|
|
'modules': {
|
|
|
|
'A': {
|
|
|
|
'modules': {
|
|
|
|
'B': {},
|
|
|
|
},
|
|
|
|
'rules': ['R'],
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
'''
|
|
|
|
modules = {}
|
|
|
|
if 'modules' in tree:
|
|
|
|
for module_name, sub_tree in tree['modules'].items():
|
|
|
|
full_name = prefix + module_name
|
|
|
|
new_prefix = full_name + peru.scope.SCOPE_SEPARATOR
|
|
|
|
module_scope = scope_tree_to_scope(sub_tree, new_prefix)
|
|
|
|
modules[module_name] = DummyModule(full_name, module_scope)
|
|
|
|
rules = {}
|
|
|
|
if 'rules' in tree:
|
|
|
|
for rule_name in tree['rules']:
|
|
|
|
full_name = prefix + rule_name
|
|
|
|
rules[rule_name] = DummyRule(full_name)
|
|
|
|
return peru.scope.Scope(modules, rules)
|
|
|
|
|
|
|
|
|
|
|
|
class DummyModule:
|
|
|
|
def __init__(self, name, scope):
|
|
|
|
self.name = name
|
|
|
|
self.scope = scope
|
|
|
|
|
|
|
|
@asyncio.coroutine
|
|
|
|
def parse_peru_file(self, dummy_runtime):
|
|
|
|
return self.scope, None
|
|
|
|
|
|
|
|
|
|
|
|
class DummyRule:
|
|
|
|
def __init__(self, name):
|
|
|
|
self.name = name
|
|
|
|
|
|
|
|
|
|
|
|
class DummyRuntime:
|
|
|
|
pass
|