*** empty log message ***

svn path=/trunk/boinc/; revision=12210
This commit is contained in:
David Anderson 2007-03-11 03:15:17 +00:00
parent 071467d702
commit 5cb43bbcf5
6 changed files with 50 additions and 3 deletions

View File

@ -2315,3 +2315,12 @@ David 10 Mar 2007
sched/
server_types.C
David 10 Mar 2007
- scheduler: add one_result_per_host_per_wu option.
This is useful if you use homogeneous redundancy
and most hosts of a particular class belong to a single user.
sched/
sched_array.C
sched_config.C,h

View File

@ -100,6 +100,8 @@ Use the Apache 2.0 mod_deflate module to automatically
compress files on the fly.
This method will work with all BOINC clients,
but it will do compression only for 5.4+ clients.
Here is a <a href=apache_deflate.txt>cookbook</a>
on how to configure this.
<li>
Compress files and give them a filename suffix such as '.gz'.

View File

@ -208,6 +208,7 @@ echo html_text("
[ <default_disk_max_used_gb> X </default_disk_max_used_gb> ]
[ <default_disk_max_used_pct> X </default_disk_max_used_pct> ]
[ <default_disk_min_free_gb> X </default_disk_min_used_pct> ]
[ <one_result_per_host_per_wu/> ]
");
list_start();
list_option("one_result_per_user_per_wu",
@ -374,6 +375,12 @@ list_item("reliable_time<br> reliable_min_avg_credit<br>
(typically 0.5 or so).
"
);
list_option("one_result_per_host_per_wu",
"If present, send at most one result of a given workunit to a given host.
This is weaker than one_result_per_user_per_wu;
it is useful if you're using homogeneous redundancy and
most of the hosts of a particular class belong to a single user."
);
list_end();
echo "

View File

@ -110,8 +110,8 @@ void scan_work_array(
}
}
// If we are looking for infeasible results and the result is not infeasiable
// then move on
// don't send if we are looking for infeasible results
// and the result is not infeasible
//
if (reply.wreq.infeasible_only && (wu_result.infeasible_count==0)) {
continue;
@ -119,7 +119,7 @@ void scan_work_array(
// don't send if we're already sending a result for same WU
//
if (config.one_result_per_user_per_wu) {
if (config.one_result_per_user_per_wu || config.one_result_per_host_per_wu) {
if (wu_already_in_reply(wu_result.workunit, reply)) {
continue;
}
@ -194,6 +194,33 @@ void scan_work_array(
goto dont_send;
}
}
} else if (config.one_result_per_host_per_wu) {
// Don't send if we've already sent a result
// of this WU to this host.
// We only have to check this
// if we don't send one result per user.
//
sprintf(buf,
"where workunitid=%d and hostid=%d",
wu_result.workunit.id, reply.host.id
);
retval = result.count(n, buf);
if (retval) {
log_messages.printf(
SCHED_MSG_LOG::MSG_CRITICAL,
"send_work: can't get result count (%d)\n", retval
);
goto dont_send;
} else {
if (n>0) {
log_messages.printf(
SCHED_MSG_LOG::MSG_DEBUG,
"send_work: host %d already has %d result(s) for WU %d\n",
reply.host.id, n, wu_result.workunit.id
);
goto dont_send;
}
}
}
// if desired, make sure redundancy is homogeneous

View File

@ -79,6 +79,7 @@ int SCHED_CONFIG::parse(FILE* f) {
else if (xp.parse_str(tag, "upload_dir", upload_dir, sizeof(upload_dir))) continue;
else if (xp.parse_str(tag, "sched_lockfile_dir", sched_lockfile_dir, sizeof(sched_lockfile_dir))) continue;
else if (xp.parse_bool(tag, "one_result_per_user_per_wu", one_result_per_user_per_wu)) continue;
else if (xp.parse_bool(tag, "one_result_per_host_per_wu", one_result_per_host_per_wu)) continue;
else if (xp.parse_bool(tag, "non_cpu_intensive", non_cpu_intensive)) continue;
else if (xp.parse_bool(tag, "verify_files_on_app_start", verify_files_on_app_start)) continue;
else if (xp.parse_bool(tag, "homogeneous_redundancy", homogeneous_redundancy)) continue;

View File

@ -41,6 +41,7 @@ public:
char upload_dir[256];
char sched_lockfile_dir[256];
bool one_result_per_user_per_wu;
bool one_result_per_host_per_wu;
bool msg_to_host;
int min_sendwork_interval;
int max_wus_to_send;