random send order

svn path=/trunk/boinc/; revision=1121
This commit is contained in:
David Anderson 2003-04-01 03:28:37 +00:00
parent f7857fc386
commit 8a3e89f698
21 changed files with 209 additions and 137 deletions

View File

@ -3967,3 +3967,39 @@ David Mar 31 2003
team_create_action.php
test/
test_uc.php
David Mar 31 2003
- added random field to result.
Results are sent in random order, making it hard for hackers
to get multiple results for the same WU
- user-visible messages about files give names, not URLs
- Changed all occurrences of "time test" and "speed test"
to "CPU benchmark".
Please use accurate, consistent terminology in the code.
- changed command-line options:
-skip_cpu_benchmarks instead of -no_time_tests
-run_cpu_benchmarks instead of -run_speed_test
client/
client_state.C,h
file_names.h
hostinfo.C,h
net_xfer.C
pers_file_xfer.C
speed_stats.h
db/
constraints.sql
db.h
db_mysql.C
schema.sql
html_user/
prefs.inc
user.inc
sched/
make_work.C
test/
test_1sec.php
test_backend.php
test_uc.php
tools/
backend_lib.C

View File

@ -437,6 +437,7 @@ int ACTIVE_TASK_SET::insert(ACTIVE_TASK* atp) {
return 0;
}
#if 0
void ACTIVE_TASK_SET::free_mem() {
vector<ACTIVE_TASK*>::iterator at_iter;
ACTIVE_TASK *at;
@ -448,6 +449,7 @@ void ACTIVE_TASK_SET::free_mem() {
delete at;
}
}
#endif
// Checks if any child processes have exited and records their final CPU time
//

View File

@ -67,7 +67,8 @@ CLIENT_STATE::CLIENT_STATE() {
client_state_dirty = false;
exit_when_idle = false;
update_prefs = false;
run_time_test = true;
run_cpu_benchmarks = false;
skip_cpu_benchmarks = false;
file_xfer_giveup_period = PERS_GIVEUP;
contacted_sched_server = false;
activities_suspended = false;
@ -86,14 +87,16 @@ CLIENT_STATE::CLIENT_STATE() {
strcpy(socks_user_passwd, "");
strcpy(host_venue, "");
suspend_requested = false;
run_speed_test = false;
start_saver = false;
#ifdef _WIN32
time_tests_handle = NULL;
cpu_benchmarks_handle = NULL;
#endif
time_tests_id = 0;
cpu_benchmarks_id = 0;
}
#if 0
// what's the purpose of this?
void CLIENT_STATE::free_mem() {
vector<PROJECT*>::iterator proj_iter;
vector<APP*>::iterator app_iter;
@ -152,6 +155,7 @@ void CLIENT_STATE::free_mem() {
active_tasks.free_mem();
}
#endif
void CLIENT_STATE::install_global_prefs() {
net_xfers->max_bytes_sec_up = global_prefs.max_bytes_sec_up;
@ -211,17 +215,17 @@ int CLIENT_STATE::init() {
// running CPU benchmarks is slow, so do it infrequently
//
if (gstate.should_run_time_tests()) {
time_tests_start = time(0);
if (gstate.should_run_cpu_benchmarks()) {
cpu_benchmarks_start = time(0);
show_message(NULL, "Running CPU benchmarks", MSG_INFO);
#ifdef _WIN32
time_tests_handle = CreateThread(
NULL, 0, win_time_tests, NULL, 0, &time_tests_id
cpu_benchmarks_handle = CreateThread(
NULL, 0, win_cpu_benchmarks, NULL, 0, &cpu_benchmarks_id
);
#else
time_tests_id = fork();
if (time_tests_id == 0) {
_exit(time_tests());
cpu_benchmarks_id = fork();
if (cpu_benchmarks_id == 0) {
_exit(cpu_benchmarks());
}
#endif
}
@ -257,19 +261,19 @@ int CLIENT_STATE::init() {
return 0;
}
// Returns true if time tests should be run
// flag or if it's been a month since we last checked time stats
// Returns true if CPU benchmarks should be run:
// flag is set or it's been a month since we last ran
//
bool CLIENT_STATE::should_run_time_tests() {
bool CLIENT_STATE::should_run_cpu_benchmarks() {
return (
run_speed_test ||
run_cpu_benchmarks ||
(difftime(time(0), (time_t)host_info.p_calculated) > BENCHMARK_PERIOD)
);
}
#ifdef _WIN32
DWORD WINAPI CLIENT_STATE::win_time_tests(LPVOID) {
return gstate.time_tests();
DWORD WINAPI CLIENT_STATE::win_cpu_benchmarks(LPVOID) {
return gstate.cpu_benchmarks();
}
#endif
@ -277,7 +281,7 @@ DWORD WINAPI CLIENT_STATE::win_time_tests(LPVOID) {
// NOTE: this locks up the process for 10-20 seconds,
// so it should be called very seldom
//
int CLIENT_STATE::time_tests() {
int CLIENT_STATE::cpu_benchmarks() {
HOST_INFO host_info;
FILE* finfo;
double fpop_test_secs = 3.3;
@ -286,9 +290,17 @@ int CLIENT_STATE::time_tests() {
clear_host_info(host_info);
if (log_flags.measurement_debug) {
printf("Running time tests.\n");
printf("Running CPU benchmarks.\n");
}
if (run_time_test) {
if (skip_cpu_benchmarks) {
if (log_flags.measurement_debug) {
show_message(0, "Skipping CPU benchmarks\n", MSG_INFO);
}
host_info.p_fpops = 1e9;
host_info.p_iops = 1e9;
host_info.p_membw = 4e9;
host_info.m_cache = 1e6;
} else {
if (log_flags.measurement_debug) {
printf(
"Running floating point test for about %.1f seconds.\n",
@ -315,85 +327,77 @@ int CLIENT_STATE::time_tests() {
// need to check cache!!
host_info.m_cache = 1e6;
} else {
if (log_flags.measurement_debug) {
printf("Using fake performance numbers\n");
}
host_info.p_fpops = 1e9;
host_info.p_iops = 1e9;
host_info.p_membw = 4e9;
host_info.m_cache = 1e6;
}
host_info.p_calculated = (double)time(0);
finfo = fopen(TIME_TESTS_FILE_NAME, "w");
finfo = fopen(CPU_BENCHMARKS_FILE_NAME, "w");
if(!finfo) return ERR_FOPEN;
host_info.write_time_tests(finfo);
host_info.write_cpu_benchmarks(finfo);
fclose(finfo);
return 0;
}
// checks if the time tests are running
// checks if the CPU benchmarks are running
//
int CLIENT_STATE::check_time_tests() {
int CLIENT_STATE::check_cpu_benchmarks() {
FILE* finfo;
int retval;
if (time_tests_id) {
if (cpu_benchmarks_id) {
#ifdef _WIN32
DWORD exit_code = 0;
GetExitCodeThread(time_tests_handle, &exit_code);
GetExitCodeThread(cpu_benchmarks_handle, &exit_code);
if(exit_code == STILL_ACTIVE) {
if(time(NULL) > time_tests_start + MAX_TIME_TESTS_SECONDS) {
if(time(NULL) > cpu_benchmarks_start + MAX_CPU_BENCHMARKS_SECONDS) {
show_message(NULL, "CPU benchmarks timed out, using default values", MSG_ERROR);
TerminateThread(time_tests_handle, 0);
CloseHandle(time_tests_handle);
TerminateThread(cpu_benchmarks_handle, 0);
CloseHandle(cpu_benchmarks_handle);
host_info.p_fpops = 1e9;
host_info.p_iops = 1e9;
host_info.p_membw = 4e9;
host_info.m_cache = 1e6;
time_tests_id = 0;
return TIME_TESTS_ERROR;
cpu_benchmarks_id = 0;
return CPU_BENCHMARKS_ERROR;
}
return TIME_TESTS_RUNNING;
return CPU_BENCHMARKS_RUNNING;
}
CloseHandle(time_tests_handle);
CloseHandle(cpu_benchmarks_handle);
#else
int exit_code = 0;
retval = waitpid(time_tests_id, &exit_code, WNOHANG);
retval = waitpid(cpu_benchmarks_id, &exit_code, WNOHANG);
if(retval == 0) {
if((unsigned int)time(NULL) > time_tests_start + MAX_TIME_TESTS_SECONDS) {
if((unsigned int)time(NULL) > cpu_benchmarks_start + MAX_CPU_BENCHMARKS_SECONDS) {
show_message(NULL, "CPU benchmarks timed out, using default values", MSG_ERROR);
kill(time_tests_id, SIGKILL);
kill(cpu_benchmarks_id, SIGKILL);
host_info.p_fpops = 1e9;
host_info.p_iops = 1e9;
host_info.p_membw = 4e9;
host_info.m_cache = 1e6;
time_tests_id = 0;
return TIME_TESTS_ERROR;
cpu_benchmarks_id = 0;
return CPU_BENCHMARKS_ERROR;
}
return TIME_TESTS_RUNNING;
return CPU_BENCHMARKS_RUNNING;
}
#endif
time_tests_id = 0;
cpu_benchmarks_id = 0;
show_message(NULL, "CPU benchmarks complete", MSG_INFO);
finfo = fopen(TIME_TESTS_FILE_NAME, "r");
finfo = fopen(CPU_BENCHMARKS_FILE_NAME, "r");
if (!finfo) {
show_message(NULL, "Can't open CPU benchmark file, using default values", MSG_ERROR);
host_info.p_fpops = 1e9;
host_info.p_iops = 1e9;
host_info.p_membw = 4e9;
host_info.m_cache = 1e6;
return TIME_TESTS_ERROR;
return CPU_BENCHMARKS_ERROR;
}
retval = host_info.parse_time_tests(finfo);
retval = host_info.parse_cpu_benchmarks(finfo);
fclose(finfo);
if (retval) return TIME_TESTS_ERROR;
file_delete(TIME_TESTS_FILE_NAME);
return TIME_TESTS_COMPLETE;
if (retval) return CPU_BENCHMARKS_ERROR;
file_delete(CPU_BENCHMARKS_FILE_NAME);
return CPU_BENCHMARKS_COMPLETE;
}
return TIME_TESTS_NOT_RUNNING;
return CPU_BENCHMARKS_NOT_RUNNING;
}
// Return the maximum allowed disk usage as determined by user preferences.
@ -471,11 +475,12 @@ int CLIENT_STATE::check_suspend_activities() {
sprintf(susp_msg, "Suspending activity - time of day");
}
}
// Stop the applications while we're running time tests
// Don't work while we're running CPU benchmarks
//
if (check_time_tests() == TIME_TESTS_RUNNING) {
if (check_cpu_benchmarks() == CPU_BENCHMARKS_RUNNING) {
should_suspend = true;
sprintf(susp_msg, "Suspending activity - running time tests");
sprintf(susp_msg, "Suspending activity - running CPU benchmarks");
}
if (should_suspend) {
@ -520,7 +525,7 @@ bool CLIENT_STATE::do_something() {
check_suspend_activities();
if (check_time_tests() == TIME_TESTS_RUNNING) return false;
if (check_cpu_benchmarks() == CPU_BENCHMARKS_RUNNING) return false;
print_log("Polling; active layers:\n");
net_stats.poll(*net_xfers);
@ -706,10 +711,10 @@ int CLIENT_STATE::parse_state_file() {
done:
fclose(f);
// This was for updating speed stats on the beta
// This was for updating CPU benchmarks on the beta
// test, it can be taken out eventually,
if (old_major_vers <= 0 && old_minor_vers <= 16) {
run_speed_test = true;
run_cpu_benchmarks = true;
}
return retval;
}
@ -1282,8 +1287,8 @@ void CLIENT_STATE::parse_cmdline(int argc, char** argv) {
for (i=1; i<argc; i++) {
if (!strcmp(argv[i], "-exit_when_idle")) {
exit_when_idle = true;
} else if (!strcmp(argv[i], "-no_time_test")) {
run_time_test = false;
} else if (!strcmp(argv[i], "-skip_cpu_benchmarks")) {
skip_cpu_benchmarks = true;
} else if (!strcmp(argv[i], "-exit_after_app_start")) {
exit_after_app_start_secs = atoi(argv[++i]);
} else if (!strcmp(argv[i], "-file_xfer_giveup_period")) {
@ -1299,8 +1304,8 @@ void CLIENT_STATE::parse_cmdline(int argc, char** argv) {
} else if (!strcmp(argv[i], "-update_prefs")) {
update_prefs = true;
} else if (!strcmp(argv[i], "-run_speed_test")) {
run_speed_test = true;
} else if (!strcmp(argv[i], "-run_cpu_benchmarks")) {
run_cpu_benchmarks = true;
} else if (!strcmp(argv[i], "-add_new_project")) {
add_new_project();
} else if (!strcmp(argv[i], "-version")) {
@ -1312,7 +1317,7 @@ void CLIENT_STATE::parse_cmdline(int argc, char** argv) {
" -version show version info\n"
" -add_new_project add project (will prompt for URL, account key)\n"
" -update_prefs contact all projects to update preferences\n"
" -run_speed_test run the speed benchmark routines\n",
" -run_cpu_benchmarks run the CPU benchmarks\n",
argv[0]
);
exit(0);

View File

@ -74,17 +74,17 @@ public:
void parse_cmdline(int argc, char** argv);
void parse_env_vars();
bool time_to_exit();
bool should_run_time_tests();
int time_tests();
bool should_run_cpu_benchmarks();
int cpu_benchmarks();
#ifdef _WIN32
static DWORD WINAPI win_time_tests(LPVOID);
HANDLE time_tests_handle;
DWORD time_tests_id;
static DWORD WINAPI win_cpu_benchmarks(LPVOID);
HANDLE cpu_benchmarks_handle;
DWORD cpu_benchmarks_id;
#else
PROCESS_ID time_tests_id;
PROCESS_ID cpu_benchmarks_id;
#endif
unsigned int time_tests_start;
int check_time_tests();
unsigned int cpu_benchmarks_start;
int check_cpu_benchmarks();
int project_disk_usage(PROJECT*, double&);
int current_disk_usage(double&);
// returns the total disk usage of BOINC on this host
@ -92,7 +92,6 @@ public:
int file_xfer_giveup_period;
bool user_idle;
bool suspend_requested;
bool run_speed_test;
bool update_prefs;
bool start_saver;
bool exit_when_idle;
@ -114,7 +113,10 @@ private:
int core_client_minor_version;
char* platform_name;
int nslots;
bool run_time_test;
bool skip_cpu_benchmarks;
// if set, use hardwired numbers rather than running benchmarks
bool run_cpu_benchmarks;
// if set, run benchmarks on client startup
bool activities_suspended;
int exit_after_app_start_secs;
// if nonzero, exit this many seconds after starting an app

View File

@ -36,20 +36,20 @@ extern void get_account_filename(char* master_url, char* path);
extern bool is_account_file(char*);
extern void escape_project_url(char *in, char* out);
#define PROJECTS_DIR "projects"
#define SLOTS_DIR "slots"
#define STATE_FILE_TEMP "state_file_temp.xml"
#define STATE_FILE_NAME "client_state.xml"
#define GLOBAL_PREFS_FILE_NAME "global_prefs.xml"
#define MASTER_FILE_NAME "master.html"
#define SCHED_OP_REQUEST_FILE "sched_request.xml"
#define SCHED_OP_RESULT_FILE "sched_reply.xml"
#define LOG_FLAGS_FILE "log_flags.xml"
#define TEMP_FILE_NAME "temp.xml"
#define STDERR_FILE_NAME "stderr.txt"
#define STDOUT_FILE_NAME "stdout.txt"
#define TIME_TESTS_FILE_NAME "time_tests.xml"
#define LOCK_FILE_NAME "lockfile"
#define INI_FILE_NAME "boinc.ini"
#define LANGUAGE_FILE_NAME "language.ini"
#define LIST_STATE_FILE_NAME "list.ini"
#define PROJECTS_DIR "projects"
#define SLOTS_DIR "slots"
#define STATE_FILE_TEMP "state_file_temp.xml"
#define STATE_FILE_NAME "client_state.xml"
#define GLOBAL_PREFS_FILE_NAME "global_prefs.xml"
#define MASTER_FILE_NAME "master.html"
#define SCHED_OP_REQUEST_FILE "sched_request.xml"
#define SCHED_OP_RESULT_FILE "sched_reply.xml"
#define LOG_FLAGS_FILE "log_flags.xml"
#define TEMP_FILE_NAME "temp.xml"
#define STDERR_FILE_NAME "stderr.txt"
#define STDOUT_FILE_NAME "stdout.txt"
#define CPU_BENCHMARKS_FILE_NAME "cpu_benchmarks.xml"
#define LOCK_FILE_NAME "lockfile"
#define INI_FILE_NAME "boinc.ini"
#define LANGUAGE_FILE_NAME "language.ini"
#define LIST_STATE_FILE_NAME "list.ini"

View File

@ -150,17 +150,17 @@ int HOST_INFO::write(FILE* out) {
return 0;
}
// Parse the time tests for host information
// note that unlike parse this checks for the opening tag
// and does not clear the memory of this struct
// CPU benchmarks are run in a separate process,
// which communicates its result via a file.
// The following functions read and write this file.
//
int HOST_INFO::parse_time_tests(FILE* in) {
int HOST_INFO::parse_cpu_benchmarks(FILE* in) {
char buf[256];
fgets(buf, 256, in);
while (fgets(buf, 256, in)) {
if (match_tag(buf, "<time_tests>"));
else if (match_tag(buf, "</time_tests>")) return 0;
if (match_tag(buf, "<cpu_benchmarks>"));
else if (match_tag(buf, "</cpu_benchmarks>")) return 0;
else if (parse_double(buf, "<p_fpops>", p_fpops)) continue;
else if (parse_double(buf, "<p_iops>", p_iops)) continue;
else if (parse_double(buf, "<p_membw>", p_membw)) continue;
@ -171,17 +171,15 @@ int HOST_INFO::parse_time_tests(FILE* in) {
return 0;
}
// Write the time tests for the host information
//
int HOST_INFO::write_time_tests(FILE* out) {
int HOST_INFO::write_cpu_benchmarks(FILE* out) {
fprintf(out,
"<time_tests>\n"
"<cpu_benchmarks>\n"
" <p_fpops>%f</p_fpops>\n"
" <p_iops>%f</p_iops>\n"
" <p_membw>%f</p_membw>\n"
" <p_calculated>%f</p_calculated>\n"
" <m_cache>%f</m_cache>\n"
"</time_tests>\n",
"</cpu_benchmarks>\n",
p_fpops,
p_iops,
p_membw,

View File

@ -55,8 +55,8 @@ struct HOST_INFO {
int parse(FILE*);
int write(FILE*);
int parse_time_tests(FILE*);
int write_time_tests(FILE*);
int parse_cpu_benchmarks(FILE*);
int write_cpu_benchmarks(FILE*);
};
extern bool host_is_running_on_batteries();

View File

@ -116,8 +116,15 @@ bool PERS_FILE_XFER::start_xfer() {
}
if (log_flags.file_xfer) {
sprintf(buf,
"Started %s of %s to %s",
(is_upload ? "upload" : "download"), fip->name, fip->get_url()
"Started %s of %s",
(is_upload ? "upload" : "download"), fip->name
);
show_message(fip->project, buf, MSG_INFO);
}
if (log_flags.file_xfer_debug) {
sprintf(buf,
"URL: %s",
fip->get_url()
);
show_message(fip->project, buf, MSG_INFO);
}
@ -160,8 +167,16 @@ bool PERS_FILE_XFER::poll(time_t now) {
if (log_flags.file_xfer) {
sprintf(
buf,
"File transfer done for %s; error code %d",
fip->get_url(), fxp->file_xfer_retval
"Finished %s of %s",
is_upload?"upload":"download", fip->name
);
show_message(fip->project, buf, MSG_INFO);
}
if (log_flags.file_xfer_debug) {
sprintf(
buf,
"file transfer status %d",
fxp->file_xfer_retval
);
show_message(fip->project, buf, MSG_INFO);
}

View File

@ -10,18 +10,18 @@
#define NUM_DOUBLES 28
#define NUM_INTS 28
#define CACHE_MIN 1024 /* smallest cache (in words) */
#define CACHE_MAX 512*1024 /* largest cache */
#define STRIDE_MIN 1 /* smallest stride (in words) */
#define STRIDE_MAX 128 /* largest stride */
#define SAMPLE 10 /* to get a larger time sample */
#define CACHE_MIN 1024 // smallest cache (in words)
#define CACHE_MAX 512*1024 // largest cache
#define STRIDE_MIN 1 // smallest stride (in words)
#define STRIDE_MAX 128 // largest stride
#define SAMPLE 10 // to get a larger time sample
#define SECS_PER_RUN 0.2
#define MAX_TIME_TESTS_SECONDS 60
#define TIME_TESTS_RUNNING 0
#define TIME_TESTS_COMPLETE 1
#define TIME_TESTS_NOT_RUNNING 2
#define TIME_TESTS_ERROR 3
#define MAX_CPU_BENCHMARKS_SECONDS 60
#define CPU_BENCHMARKS_RUNNING 0
#define CPU_BENCHMARKS_COMPLETE 1
#define CPU_BENCHMARKS_NOT_RUNNING 2
#define CPU_BENCHMARKS_ERROR 3
int check_cache_size( int mem_size );
double double_flop_test( int iterations, int print_debug );

View File

@ -31,7 +31,7 @@ alter table workunit
alter table result
add unique(name),
add index res_wuid (workunitid),
add index ind_res_st (server_state),
add index ind_res_st (server_state, random),
add index res_filedel (file_delete_state);
alter table host

View File

@ -298,6 +298,7 @@ struct RESULT {
double claimed_credit; // CPU time times host credit/sec
double granted_credit; // == canonical credit of WU
int opaque; // project-specific; usually external ID
int random; // determines send order
// the following not used in the DB
char wu_name[256];

View File

@ -298,14 +298,14 @@ void BOINC_MYSQL_DB::struct_to_str(void* vp, char* q, int type) {
"name='%s', cpu_time=%f, "
"xml_doc_in='%s', xml_doc_out='%s', stderr_out='%s', "
"batch=%d, file_delete_state=%d, validate_state=%d, "
"claimed_credit=%f, granted_credit=%f, opaque=%d",
"claimed_credit=%f, granted_credit=%f, opaque=%d, random=%d",
rp->id, rp->create_time, rp->workunitid,
rp->server_state, rp->outcome, rp->client_state,
rp->hostid, rp->report_deadline, rp->sent_time, rp->received_time,
rp->name, rp->cpu_time,
rp->xml_doc_in, rp->xml_doc_out, rp->stderr_out,
rp->batch, rp->file_delete_state, rp->validate_state,
rp->claimed_credit, rp->granted_credit, rp->opaque
rp->claimed_credit, rp->granted_credit, rp->opaque, rp->random
);
unescape_single_quotes(rp->xml_doc_out);
unescape_single_quotes(rp->stderr_out);
@ -505,6 +505,7 @@ void BOINC_MYSQL_DB::row_to_struct(MYSQL_ROW& r, void* vp, int type) {
rp->claimed_credit = atof(r[i++]);
rp->granted_credit = atof(r[i++]);
rp->opaque = atoi(r[i++]);
rp->random = atoi(r[i++]);
break;
case TYPE_WORKSEQ:
wsp = (WORKSEQ*)vp;
@ -860,7 +861,7 @@ int db_result_enum_server_state(RESULT& p, int limit) {
static ENUM e;
char buf[256];
if (!e.active) sprintf(buf, "where server_state=%d", p.server_state);
if (!e.active) sprintf(buf, "where server_state=%d order by random", p.server_state);
return boinc_db.db_enum(e, &p, TYPE_RESULT, buf, limit);
}

View File

@ -174,6 +174,7 @@ create table result (
claimed_credit double not null,
granted_credit double not null,
opaque integer not null,
random integer not null,
primary key (id)
);

View File

@ -14,10 +14,10 @@
// <run_if_user_active/>
// <work_buf_min_days>1.3</work_buf_min_days>
// ...
// <home>
// <venue name="home">
// <run_if_user_active/>
// ...
// </home>
// </venue>
// </global_preferences>
//
// and

View File

@ -36,8 +36,8 @@ function show_user_profile_private($user) {
row2("Postal code", $user->postal_code);
row2("", "<a href=edit_user_info_form.php>Edit account info</a>");
row1("Your preferences");
row2("General", "<a href=prefs.php?subset=global>View</a>");
row2(PROJECT, "<a href=prefs.php?subset=project>View</a>");
row2("General", "<a href=prefs.php?subset=global>View/edit</a>");
row2(PROJECT, "<a href=prefs.php?subset=project>View/edit</a>");
}
// show summary of dynamic and static info (public)

View File

@ -220,16 +220,17 @@ int main(int argc, char** argv) {
exit(1);
}
if (lock_file(LOCKFILE)) {
fprintf(stderr, "Another copy of make_work is already running\n");
exit(1);
}
if (asynch) {
if (fork()) {
exit(0);
}
}
if (lock_file(LOCKFILE)) {
fprintf(stderr, "Another copy of make_work is already running\n");
exit(1);
}
srand48(getpid() + time(0));
make_work();
}

View File

@ -52,7 +52,7 @@
$project1->start_servers();
$project2->start_servers();
$host->run("-exit_when_idle -no_time_test");
$host->run("-exit_when_idle -skip_cpu_benchmarks");
$project1->stop();
$project2->stop();

View File

@ -56,7 +56,7 @@
$project->start_servers();
// Run the client until there's no more work
$host->run("-exit_when_idle -no_time_test");
$host->run("-exit_when_idle -skip_cpu_benchmarks");
sleep(5);

View File

@ -43,7 +43,7 @@
$work = new Work($app);
$work->wu_template = "uc_wu";
$work->result_template = "uc_result";
$work->redundancy = 10;
$work->redundancy = 2;
$work->delay_bound = 2;
// Say that 1 WU takes 1 day on a ref comp
$work->rsc_fpops = 86400*1e9/2;
@ -52,7 +52,7 @@
$work->install($project);
$project->start_servers();
$host->run("-exit_when_idle -no_time_test");
$host->run("-exit_when_idle -skip_cpu_benchmarks");
$project->stop();
$project->validate($app, 2);

9
todo
View File

@ -56,6 +56,15 @@ Limit frequency of disk writes
MEDIUM-PRIORITY (should do before public release)
-----------------------
abort result if any file exceeds max_nbytes
per-WU limits
max disk
max CPU
max VM size
Don't fork CPU benchmark process if skip_cpu_benchmarks is set
let user choose language files in installation process
write general language file manipulation functions

View File

@ -192,6 +192,7 @@ int create_result(
upload_url, download_url
);
strcpy(r.xml_doc_in, result_template_copy);
r.random = lrand48();
retval = db_result_new(r);
if (retval) {