- scheduler: Gianni requested a feature where jobs have a

"min # of GPU processors" attribute (stored in batch)
    and are sent only to hosts whose GPUs have at least this #.

    The logical place for this is in the scoring function, JOB::get_score().
    I added a clause (#ifdef'd out) that does this.
    It rejects the WU if #procs is too small,
    otherwise it adds min/actual to the score.
    This favors sending jobs that need lots of procs to GPUs that have them.

svn path=/trunk/boinc/; revision=18764
This commit is contained in:
David Anderson 2009-07-29 17:29:56 +00:00
parent 42f897e4d1
commit 4c070e3bfb
3 changed files with 39 additions and 1 deletions

View File

@ -6638,3 +6638,18 @@ Rom 29 July 2009
clientgui/ clientgui/
AccountManagerPropertiesPage.cpp, .h AccountManagerPropertiesPage.cpp, .h
ProjectPropertiesPage.cpp, .h ProjectPropertiesPage.cpp, .h
David 29 July 2009
- scheduler: Gianni requested a feature where jobs have a
"min # of GPU processors" attribute (stored in batch)
and are sent only to hosts whose GPUs have at least this #.
The logical place for this is in the scoring function, JOB::get_score().
I added a clause (#ifdef'd out) that does this.
It rejects the WU if #procs is too small,
otherwise it adds min/actual to the score.
This favors sending jobs that need lots of procs to GPUs that have them.
sched/
sched_score.cpp
server_types.h

View File

@ -110,7 +110,7 @@ bool JOB::get_score() {
score = 0; score = 0;
// Find the app_version for the client's platform. // Find the best app version to use.
// //
bavp = get_app_version(wu, true); bavp = get_app_version(wu, true);
if (!bavp) return false; if (!bavp) return false;
@ -128,6 +128,27 @@ bool JOB::get_score() {
score = 1; score = 1;
#if 0
// example: for CUDA app, wu.batch is the minimum number of processors.
// Don't send if #procs is less than this.
// Otherwise add min/actual to score
// (this favors sending jobs that need lots of procs to GPUs that have them)
//
if (!strcmp(app->name, "foobar") && bavp->host_usage.ncudas) {
if (!g_request->coproc_cuda) {
log_messages.printf(MSG_CRITICAL,
"[HOST#%d] expected CUDA device\n", g_reply->host.id
);
return false;
}
int n = g_request->coproc_cuda->prop.multiProcessorCount;
if (n < wu.batch) {
return false;
}
score += ((double)wu.batch)/n;
}
#endif
// check if user has selected apps, // check if user has selected apps,
// and send beta work to beta users // and send beta work to beta users
// //

View File

@ -257,6 +257,8 @@ struct BEST_APP_VERSION {
// populated otherwise: // populated otherwise:
APP_VERSION* avp; APP_VERSION* avp;
// populated in either case:
HOST_USAGE host_usage; HOST_USAGE host_usage;
BEST_APP_VERSION() { BEST_APP_VERSION() {