mirror of https://github.com/BOINC/boinc.git
test_abort & test scripts
svn path=/trunk/boinc/; revision=1593
This commit is contained in:
parent
85066d10be
commit
e29a5e4c37
|
@ -5032,7 +5032,7 @@ John Brian 2003/06/24
|
|||
resource.rc
|
||||
resource.h
|
||||
|
||||
Karl 2003/06/23
|
||||
Karl 2003/06/24
|
||||
- Got test_1sec working
|
||||
|
||||
test/
|
||||
|
@ -5043,11 +5043,21 @@ Karl 2003/06/23
|
|||
sched/
|
||||
start_servers.C
|
||||
|
||||
Karl 2003/06/24
|
||||
Karl 2003/06/25
|
||||
- if application exceeds disk space limit, output message "Exceeded size
|
||||
limit" instead of "Couldn't upload files"
|
||||
- test that an aborted result due to file limit causes client_state =
|
||||
RESULT_RESULT_OUTCOME_CLIENT_ERROR (currently this test FAILS!)
|
||||
|
||||
db/
|
||||
boinc_db.h
|
||||
client/
|
||||
client_state.C
|
||||
sched/
|
||||
handle_request.C
|
||||
test/
|
||||
boinc.py
|
||||
test_abort.py (new)
|
||||
test_backend.py
|
||||
test_concat.py
|
||||
test_uc.py
|
||||
|
|
121
test/boinc.py
121
test/boinc.py
|
@ -13,7 +13,7 @@
|
|||
|
||||
from version import *
|
||||
from boinc_db import *
|
||||
import os, sys, time, shutil, re, atexit, traceback, random
|
||||
import os, sys, glob, time, shutil, re, atexit, traceback, random
|
||||
import MySQLdb
|
||||
|
||||
errors = 0
|
||||
|
@ -193,6 +193,17 @@ def map_xml(dic, keys):
|
|||
def generate_shmem_key():
|
||||
return '0x1111%x' % random.randrange(0,2**16)
|
||||
|
||||
def _check_vars(dict, **names):
|
||||
for key in names:
|
||||
value = names[key]
|
||||
if not key in dict:
|
||||
if value == None:
|
||||
raise SystemExit('error in test script: required parameter "%s" not specified'%key)
|
||||
dict[key] = value
|
||||
for key in dict:
|
||||
if not key in names:
|
||||
raise SystemExit('error in test script: extraneous parameter "%s" unknown'%key)
|
||||
|
||||
class STARTS_WITH(str):
|
||||
pass
|
||||
|
||||
|
@ -283,7 +294,7 @@ class Project:
|
|||
def __init__(self, works, users=None, hosts=None,
|
||||
short_name=None, long_name=None, core_versions=None,
|
||||
apps=None, app_versions=None, appname=None,
|
||||
resource_share=1,
|
||||
resource_share=None, redundancy=None,
|
||||
add_to_list=True):
|
||||
if add_to_list:
|
||||
all_projects.append(self)
|
||||
|
@ -293,7 +304,8 @@ class Project:
|
|||
self.db_passwd = ''
|
||||
self.generate_keys = False
|
||||
self.shmem_key = generate_shmem_key()
|
||||
self.resource_share = resource_share
|
||||
self.resource_share = resource_share or 1
|
||||
self.redundancy = redundancy or 2
|
||||
self.output_level = 3
|
||||
|
||||
self.master_url = os.path.join(HTML_URL , self.short_name , '')
|
||||
|
@ -566,46 +578,67 @@ class Project:
|
|||
def _run_cgi_prog(self, prog, args='', logfile=None):
|
||||
verbose_shell_call("cd %s && ./%s %s >> %s.out 2>&1" %
|
||||
(self.dir('cgi'), prog, args, (logfile or prog)))
|
||||
def _run_cgi_onepass(self, prog, args=''):
|
||||
self._run_cgi_prog(prog, '-d 3 -one_pass '+args)
|
||||
def start_servers(self):
|
||||
self.restart()
|
||||
self._run_cgi_prog('start_servers')
|
||||
verbose_sleep("Starting servers for project '%s'" % self.short_name, 1)
|
||||
self.read_server_pids()
|
||||
|
||||
def _install_prog(self, prog, args=''):
|
||||
def read_server_pids(self):
|
||||
pid_dir = self.dir('cgi')
|
||||
self.pids = {}
|
||||
for pidfile in glob.glob(os.path.join(pid_dir, '*.pid')):
|
||||
try:
|
||||
pid = int(open(pidfile).readline())
|
||||
except:
|
||||
pid = 0
|
||||
if pid:
|
||||
progname = os.path.split(pidfile)[1].split('.')[0]
|
||||
self.pids[progname] = pid
|
||||
|
||||
def wait_server(self, progname, msg=None):
|
||||
msg = msg or "Waiting for %s to finish..."%progname
|
||||
verbose_echo(1, msg)
|
||||
os.waitpid(self.pids[progname], 0)
|
||||
verbose_echo(1, msg+" done.")
|
||||
|
||||
def _build_cgi_commandlines(self, progname, kwargs):
|
||||
'''Given a KWARGS dictionary build a list of command lines string depending on the program.'''
|
||||
each_app = False
|
||||
if progname == 'feeder':
|
||||
_check_vars(kwargs)
|
||||
elif progname == 'timeout_check':
|
||||
_check_vars(kwargs, app=self.app, nerror=5, ndet=5, nredundancy=5)
|
||||
elif progname == 'make_work':
|
||||
work = kwargs.get('work', self.work)
|
||||
_check_vars(kwargs, cushion=None, redundancy=self.redundancy,
|
||||
result_template=os.path.realpath(work.result_template),
|
||||
wu_name=work.wu_template)
|
||||
elif progname == 'validate_test':
|
||||
_check_vars(kwargs, quorum=self.redundancy)
|
||||
each_app = True
|
||||
elif progname == 'file_deleter':
|
||||
_check_vars(kwargs)
|
||||
elif progname == 'assimilator':
|
||||
_check_vars(kwargs)
|
||||
each_app = True
|
||||
else:
|
||||
raise SystemExit("test script error: invalid progname '%s'"%progname)
|
||||
cmdline = ' '.join(map(lambda k: '-%s %s'%(k,kwargs[k]), kwargs.keys()))
|
||||
if each_app:
|
||||
return map(lambda av: '-app %s %s'%(av.app.name,cmdline), self.app_versions)
|
||||
else:
|
||||
return [cmdline]
|
||||
|
||||
def sched_run(self, prog, **kwargs):
|
||||
for cmdline in self._build_cgi_commandlines(prog, kwargs):
|
||||
self._run_cgi_prog(prog, '-d 3 -one_pass '+cmdline)
|
||||
def sched_install(self, prog, **kwargs):
|
||||
for cmdline in self._build_cgi_commandlines(prog, kwargs):
|
||||
self.append_config('<start>./%s -d 3 -asynch %s >>%s.out 2>&1</start>' % (
|
||||
prog, args, prog))
|
||||
def install_feeder(self):
|
||||
self._install_prog('feeder')
|
||||
def install_timeout_check(self, app, nerror=5, ndet=5, nredundancy=5):
|
||||
self._install_prog('timeout_check', '-app %s -nerror %d -ndet %d -nredundancy %d' %(
|
||||
app, nerror, ndet, nredundancy))
|
||||
def install_make_work(self, work, cushion, redundancy):
|
||||
self._install_prog('make_work', '-cushion %d -redundancy %d -result_template %s -wu_name %s' %(
|
||||
cushion, redundancy,
|
||||
os.path.realpath(work.result_template),
|
||||
work.wu_template))
|
||||
def uninstall_make_work(self):
|
||||
self.remove_config('make_work')
|
||||
def install_validate(self, app, quorum):
|
||||
for app_version in self.app_versions:
|
||||
self._install_prog('validate_test', '-app %s -quorum %d' %(
|
||||
app_version.app.name, quorum))
|
||||
def validate(self, quorum):
|
||||
for app_version in self.app_versions:
|
||||
self._run_cgi_onepass('validate_test', '-app %s -quorum %d' %(
|
||||
app_version.app.name, quorum))
|
||||
def install_file_delete(self):
|
||||
self._install_prog('file_deleter')
|
||||
def file_delete(self):
|
||||
self._run_cgi_onepass('file_deleter')
|
||||
def install_assimilator(self):
|
||||
for app_version in self.app_versions:
|
||||
self._install_prog('assimilator', '-app %s' % app_version.app.name)
|
||||
def assimilate(self):
|
||||
for app_version in self.app_versions:
|
||||
self._run_cgi_onepass('assimilator', '-app %s' % app_version.app.name)
|
||||
prog, cmdline, prog))
|
||||
def sched_uninstall(self, prog):
|
||||
self.remove_config(prog)
|
||||
|
||||
def start_stripcharts(self):
|
||||
map(lambda l: self.copy(os.path.join('stripchart', l), 'cgi/'),
|
||||
|
@ -654,20 +687,21 @@ class Project:
|
|||
self.configlines = filter(lambda l: l.find(pattern)==-1, self.configlines)
|
||||
self.write_config()
|
||||
|
||||
def check_results(self, ntarget, matchresult):
|
||||
def check_results(self, matchresult, expected_count=None):
|
||||
'''MATCHRESULT should be a dictionary of columns to check, such as:
|
||||
|
||||
server_state
|
||||
stderr_out
|
||||
exit_status
|
||||
'''
|
||||
expected_count = expected_count or self.redundancy
|
||||
db = self.db_open()
|
||||
rows = _db_query(db,"select * from result")
|
||||
for row in rows:
|
||||
dict_match(matchresult, row)
|
||||
db.close()
|
||||
if len(rows) != ntarget:
|
||||
error("expected %d results, but found %d" % (ntarget, len(rows)))
|
||||
if len(rows) != expected_count:
|
||||
error("expected %d results, but found %d" % (expected_count, len(rows)))
|
||||
|
||||
def check_files_match(self, result, correct, count=None):
|
||||
'''if COUNT is specified then [0,COUNT) is mapped onto the %d in RESULT'''
|
||||
|
@ -775,14 +809,14 @@ class Host:
|
|||
filename))
|
||||
|
||||
class Work:
|
||||
def __init__(self):
|
||||
def __init__(self, redundancy=1):
|
||||
self.input_files = []
|
||||
self.rsc_iops = 1.8e12
|
||||
self.rsc_fpops = 1e13
|
||||
self.rsc_memory = 1e7
|
||||
self.rsc_disk = 1e7
|
||||
self.delay_bound = 1000
|
||||
self.redundancy = 1
|
||||
self.redundancy = redundancy
|
||||
self.app = None
|
||||
|
||||
def install(self, project):
|
||||
|
@ -841,6 +875,7 @@ class ResultMeter:
|
|||
|
||||
def run_check_all():
|
||||
'''Run all projects, run all hosts, check all projects, stop all projects.'''
|
||||
atexit.register(all_projects.stop)
|
||||
all_projects.run()
|
||||
all_projects.open_dbs() # for progress meter
|
||||
if os.environ.get('TEST_STOP_BEFORE_HOST_RUN'):
|
||||
|
@ -849,7 +884,7 @@ def run_check_all():
|
|||
all_hosts.run()
|
||||
rm.stop()
|
||||
all_projects.check()
|
||||
all_projects.stop()
|
||||
# all_projects.stop()
|
||||
|
||||
proxy_pid = 0
|
||||
def start_proxy(code):
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
## $Id$
|
||||
|
||||
# Makes sure that the client aborts when the output file size limit is
|
||||
# exceeded, and that the server knows it.
|
||||
|
||||
from test_uc import *
|
||||
|
||||
class WorkAbort(WorkUC):
|
||||
def __init__(self):
|
||||
WorkUC.__init__(self)
|
||||
self.result_template = "abort_result"
|
||||
|
||||
class ResultAbort(ResultUC):
|
||||
def __init__(self):
|
||||
ResultUC.__init__(self)
|
||||
self.client_state = RESULT_OUTCOME_CLIENT_ERROR
|
||||
|
||||
class HostAbort(Host):
|
||||
def __init__(self):
|
||||
Host.__init__(self)
|
||||
self.defargs += ' -sched_retry_delay_min 1 2>client.err'
|
||||
|
||||
class ProjectAbort(ProjectUC):
|
||||
def __init__(self):
|
||||
ProjectUC.__init__(self, works=[WorkAbort()], hosts=[HostAbort()])
|
||||
|
||||
def check(self):
|
||||
# no results should have been uploaded
|
||||
self.check_deleted("upload/uc_wu_%d_0", count=self.redundancy)
|
||||
self.sched_run('validate_test')
|
||||
self.check_results(ResultAbort())
|
||||
self.sched_run('assimilator')
|
||||
self.sched_run('file_deleter')
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_msg("result abort mechanism (disk space limit)")
|
||||
ProjectAbort()
|
||||
run_check_all()
|
|
@ -17,7 +17,7 @@ class ProjectBackend(ProjectUC):
|
|||
ProjectUC.__init__(self, redundancy = 5, short_name = 'test_backend')
|
||||
def run(self):
|
||||
self.install()
|
||||
self.install_make_work(work=self.work, cushion=self.num-1, redundancy=5)
|
||||
self.sched_install('make_work', cushion=self.num-1)
|
||||
self.start_servers()
|
||||
|
||||
# wait for 500 results to be generated
|
||||
|
@ -33,24 +33,19 @@ class ProjectBackend(ProjectUC):
|
|||
# Stop the project, deinstall make_work, and install the normal
|
||||
# backend components
|
||||
self.stop()
|
||||
self.uninstall_make_work()
|
||||
self.install_assimilator()
|
||||
self.install_file_delete()
|
||||
self.install_validate(self.app, quorum=5)
|
||||
self.install_feeder()
|
||||
# self.install_timeout_check(self.app, nerror=5, ndet=5, nredundancy=0)
|
||||
|
||||
# TODO: get PID and use wait.
|
||||
verbose_echo(1, "Waiting for make_work to finish...")
|
||||
while not os.system('pgrep -n make_work >/dev/null'):
|
||||
time.sleep(1)
|
||||
self.sched_uninstall('make_work')
|
||||
self.sched_install('assimilator')
|
||||
self.sched_install('file_deleter')
|
||||
self.sched_install('validate_test')
|
||||
self.sched_install('feeder')
|
||||
# self.sched_install('timeout_check', nredundancy=0)
|
||||
self.start_servers()
|
||||
|
||||
def check(self):
|
||||
# Give the server 30 seconds to finish assimilating/deleting
|
||||
# TODO: use wait on all processes.
|
||||
verbose_sleep("Sleeping to allow server to finish", 30)
|
||||
self.check_results(self.num, ResultUC())
|
||||
verbose_sleep("Sleeping to allow server daemons to finish", 30)
|
||||
self.check_results(ResultUC(), self.num)
|
||||
|
||||
if __name__ == '__main__':
|
||||
num = sys.argv[1:] and get_int(sys.argv[1]) or 100
|
||||
|
|
|
@ -8,35 +8,34 @@ from boinc import *
|
|||
|
||||
class WorkConcat(Work):
|
||||
def __init__(self, redundancy=2):
|
||||
Work.__init__(self)
|
||||
Work.__init__(self, redundancy=redundancy)
|
||||
self.wu_template = "concat_wu"
|
||||
self.result_template = "concat_result"
|
||||
self.redundancy = redundancy
|
||||
self.input_files = ['input']*2
|
||||
|
||||
class ProjectConcat(Project):
|
||||
def __init__(self, works=None, users=None, hosts=None):
|
||||
def __init__(self, works=None, users=None, hosts=None, redundancy=2):
|
||||
Project.__init__(self,
|
||||
appname = 'concat',
|
||||
works = works or [WorkConcat()],
|
||||
users = users,
|
||||
hosts = hosts)
|
||||
hosts = hosts,
|
||||
redundancy=redundancy)
|
||||
|
||||
def check(self):
|
||||
redundancy = self.work.redundancy
|
||||
self.validate(redundancy)
|
||||
self.sched_run('validate_test')
|
||||
result = {}
|
||||
result['server_state'] = RESULT_SERVER_STATE_OVER
|
||||
self.check_results(redundancy, result)
|
||||
self.check_files_match("upload/concat_wu_%d_0", "concat_correct_output", count=redundancy)
|
||||
self.assimilate()
|
||||
self.file_delete()
|
||||
self.check_results(result)
|
||||
self.check_files_match("upload/concat_wu_%d_0", "concat_correct_output", count=self.redundancy)
|
||||
self.sched_run('assimilator')
|
||||
self.sched_run('file_deleter')
|
||||
self.check_deleted("download/input")
|
||||
self.check_deleted("upload/concat_wu_%d_0", count=redundancy)
|
||||
self.check_deleted("upload/concat_wu_%d_0", count=self.redundancy)
|
||||
|
||||
def run(self):
|
||||
self.install()
|
||||
self.install_feeder()
|
||||
self.sched_install('feeder')
|
||||
self.start_servers()
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -55,22 +55,21 @@ class ProjectUC(Project):
|
|||
users = users or [UserUC()],
|
||||
hosts = hosts,
|
||||
short_name=short_name, long_name=long_name,
|
||||
resource_share=resource_share
|
||||
redundancy=redundancy, resource_share=resource_share
|
||||
)
|
||||
|
||||
def check(self):
|
||||
redundancy = self.work.redundancy
|
||||
self.validate(redundancy)
|
||||
self.check_results(redundancy, ResultUC())
|
||||
self.check_files_match("upload/uc_wu_%d_0", "uc_correct_output", count=redundancy)
|
||||
self.assimilate()
|
||||
self.file_delete()
|
||||
self.sched_run('validate_test')
|
||||
self.check_results(ResultUC())
|
||||
self.check_files_match("upload/uc_wu_%d_0", "uc_correct_output", count=self.redundancy)
|
||||
self.sched_run('assimilator')
|
||||
self.sched_run('file_deleter')
|
||||
self.check_deleted("download/input")
|
||||
self.check_deleted("upload/uc_wu_%d_0", count=redundancy)
|
||||
self.check_deleted("upload/uc_wu_%d_0", count=self.redundancy)
|
||||
|
||||
def run(self):
|
||||
self.install()
|
||||
self.install_feeder()
|
||||
self.sched_install('feeder')
|
||||
self.start_servers()
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
Loading…
Reference in New Issue