mirror of https://github.com/BOINC/boinc.git
- scheduler: added
- config option <matchmaker> for matchmaker scheduling - config options <mm_min_slots>, <mm_max_slots>, <job_size_matching> to control matchmaker scheduling - scheduler: tweaks to matchmaker scheduling from Kevin Reed - web: fixes to alternative stylesheet from Simek svn path=/trunk/boinc/; revision=15281
This commit is contained in:
parent
d92eedb96a
commit
49eb1246cf
|
@ -4252,3 +4252,17 @@ David May 22 2008
|
||||||
client_state.C,h
|
client_state.C,h
|
||||||
cpu_sched.C
|
cpu_sched.C
|
||||||
cs_prefs.C
|
cs_prefs.C
|
||||||
|
|
||||||
|
David May 23 2008
|
||||||
|
- scheduler: added
|
||||||
|
- config option <matchmaker> for matchmaker scheduling
|
||||||
|
- config options <mm_min_slots>, <mm_max_slots>, <job_size_matching>
|
||||||
|
to control matchmaker scheduling
|
||||||
|
- scheduler: tweaks to matchmaker scheduling from Kevin Reed
|
||||||
|
- web: fixes to alternative stylesheet from Simek
|
||||||
|
|
||||||
|
html/user/
|
||||||
|
style2.css
|
||||||
|
sched/
|
||||||
|
sched_config.C,h
|
||||||
|
sched_send.C
|
||||||
|
|
|
@ -22,6 +22,8 @@ input, select {
|
||||||
border: 1px solid grey;
|
border: 1px solid grey;
|
||||||
background-color: #eeeeee;
|
background-color: #eeeeee;
|
||||||
padding: 3px;
|
padding: 3px;
|
||||||
|
font-size: 11px;
|
||||||
|
margin: 2px;
|
||||||
}
|
}
|
||||||
|
|
||||||
input:hover, input:active, select:hover {
|
input:hover, input:active, select:hover {
|
||||||
|
@ -30,7 +32,7 @@ input:hover, input:active, select:hover {
|
||||||
|
|
||||||
input.btn {
|
input.btn {
|
||||||
margin: 3px 0px;
|
margin: 3px 0px;
|
||||||
padding: 2px 4px;
|
padding: 2px 5px;
|
||||||
}
|
}
|
||||||
|
|
||||||
h1 {
|
h1 {
|
||||||
|
@ -200,33 +202,30 @@ tr.message {
|
||||||
overflow: auto;
|
overflow: auto;
|
||||||
}
|
}
|
||||||
|
|
||||||
blockquote {
|
blockquote.postbody {
|
||||||
border-left: 2px solid #00A0E3;
|
border-left: 2px solid #00A0E3;
|
||||||
background: #d6eef8;
|
background-color: #F0F8FF;
|
||||||
padding: 3px;
|
padding: 4px 8px;
|
||||||
margin-bottom: 0px;
|
margin-bottom: 0px;
|
||||||
margin-left: 20px;
|
margin-left: 20px;
|
||||||
|
margin-right: 60px;
|
||||||
font-style: italic;
|
font-style: italic;
|
||||||
}
|
}
|
||||||
|
|
||||||
#blockquote.postbody {
|
|
||||||
border-style: inset;
|
|
||||||
border-color: blue;
|
|
||||||
background-color: rgb(240,240,255);
|
|
||||||
padding-left: 5px;
|
|
||||||
padding-bottom: 0px;
|
|
||||||
margin-bottom: 0px;
|
|
||||||
margin-left: 15px;
|
|
||||||
margin-right: 140px;
|
|
||||||
font-style: oblique
|
|
||||||
}
|
|
||||||
|
|
||||||
#thread {
|
#thread {
|
||||||
width: 100%;
|
width: 100%;
|
||||||
table-layout: fixed;
|
table-layout: fixed;
|
||||||
overflow: overflow;
|
overflow: overflow;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#threadauthor input {
|
||||||
|
background-color: white;
|
||||||
|
}
|
||||||
|
|
||||||
|
td.lastpost {
|
||||||
|
background-color: white;
|
||||||
|
}
|
||||||
|
|
||||||
.sigdelim {
|
.sigdelim {
|
||||||
width: 60%;
|
width: 60%;
|
||||||
height: 1px;
|
height: 1px;
|
||||||
|
@ -252,9 +251,9 @@ td.postbody {
|
||||||
}
|
}
|
||||||
|
|
||||||
td.postfooter{
|
td.postfooter{
|
||||||
background-color: #F0F8FF;
|
background-color: #EEEEEE;
|
||||||
border-top: 1px solid #00A0E3;
|
border-top: 1px solid #00A0E3;
|
||||||
border-left: 1px solid #00A0E3;
|
border-left: none;
|
||||||
border-right: none;
|
border-right: none;
|
||||||
border-bottom: none;
|
border-bottom: none;
|
||||||
padding-left: 6px;
|
padding-left: 6px;
|
||||||
|
|
|
@ -50,6 +50,9 @@ int SCHED_CONFIG::parse(FILE* f) {
|
||||||
int retval;
|
int retval;
|
||||||
regex_t re;
|
regex_t re;
|
||||||
|
|
||||||
|
// Don't bother to initialize to zero since it's a global.
|
||||||
|
// If this ever changes, need to initialize
|
||||||
|
//
|
||||||
mf.init_file(f);
|
mf.init_file(f);
|
||||||
max_wus_to_send = 10;
|
max_wus_to_send = 10;
|
||||||
default_disk_max_used_gb = 100.;
|
default_disk_max_used_gb = 100.;
|
||||||
|
@ -59,7 +62,6 @@ int SCHED_CONFIG::parse(FILE* f) {
|
||||||
fuh_debug_level = MSG_NORMAL;
|
fuh_debug_level = MSG_NORMAL;
|
||||||
strcpy(httpd_user, "apache");
|
strcpy(httpd_user, "apache");
|
||||||
max_ncpus = MAX_NCPUS;
|
max_ncpus = MAX_NCPUS;
|
||||||
debug_version_select = false;
|
|
||||||
|
|
||||||
if (!xp.parse_start("boinc")) return ERR_XML_PARSE;
|
if (!xp.parse_start("boinc")) return ERR_XML_PARSE;
|
||||||
if (!xp.parse_start("config")) return ERR_XML_PARSE;
|
if (!xp.parse_start("config")) return ERR_XML_PARSE;
|
||||||
|
@ -183,8 +185,12 @@ int SCHED_CONFIG::parse(FILE* f) {
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
if (xp.parse_bool(tag, "matchmaker", matchmaker)) continue;
|
||||||
|
if (xp.parse_int(tag, "mm_min_slots", mm_min_slots)) continue;
|
||||||
|
if (xp.parse_int(tag, "mm_max_slots", mm_max_slots)) continue;
|
||||||
if (xp.parse_bool(tag, "job_size_matching", job_size_matching)) continue;
|
if (xp.parse_bool(tag, "job_size_matching", job_size_matching)) continue;
|
||||||
|
|
||||||
|
|
||||||
if (xp.parse_bool(tag, "debug_version_select", debug_version_select)) continue;
|
if (xp.parse_bool(tag, "debug_version_select", debug_version_select)) continue;
|
||||||
if (xp.parse_bool(tag, "debug_assignment", debug_assignment)) continue;
|
if (xp.parse_bool(tag, "debug_assignment", debug_assignment)) continue;
|
||||||
if (xp.parse_bool(tag, "debug_prefs", debug_prefs)) continue;
|
if (xp.parse_bool(tag, "debug_prefs", debug_prefs)) continue;
|
||||||
|
|
|
@ -120,7 +120,11 @@ public:
|
||||||
int max_ncpus;
|
int max_ncpus;
|
||||||
vector<regex_t> ban_os;
|
vector<regex_t> ban_os;
|
||||||
vector<regex_t> ban_cpu;
|
vector<regex_t> ban_cpu;
|
||||||
|
bool matchmaker;
|
||||||
|
int mm_min_slots;
|
||||||
|
int mm_max_slots;
|
||||||
bool job_size_matching;
|
bool job_size_matching;
|
||||||
|
|
||||||
// log flags
|
// log flags
|
||||||
//
|
//
|
||||||
bool debug_version_select;
|
bool debug_version_select;
|
||||||
|
|
|
@ -59,11 +59,8 @@ using namespace std;
|
||||||
#define FCGI_ToFILE(x) (x)
|
#define FCGI_ToFILE(x) (x)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
//#define MATCHMAKER
|
|
||||||
|
|
||||||
#ifdef MATCHMAKER
|
|
||||||
void send_work_matchmaker(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply);
|
void send_work_matchmaker(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply);
|
||||||
#endif
|
|
||||||
|
|
||||||
int preferred_app_message_index=0;
|
int preferred_app_message_index=0;
|
||||||
|
|
||||||
|
@ -1206,6 +1203,59 @@ static void get_running_frac(SCHEDULER_REPLY& reply) {
|
||||||
if (reply.wreq.running_frac > 1) reply.wreq.running_frac = 1;
|
if (reply.wreq.running_frac > 1) reply.wreq.running_frac = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void send_work_old(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply) {
|
||||||
|
reply.wreq.beta_only = false;
|
||||||
|
reply.wreq.user_apps_only = true;
|
||||||
|
|
||||||
|
// give top priority to results that require a 'reliable host'
|
||||||
|
//
|
||||||
|
if (reply.wreq.host_info.reliable) {
|
||||||
|
reply.wreq.reliable_only = true;
|
||||||
|
reply.wreq.infeasible_only = false;
|
||||||
|
scan_work_array(sreq, reply);
|
||||||
|
}
|
||||||
|
reply.wreq.reliable_only = false;
|
||||||
|
|
||||||
|
// give 2nd priority to results for a beta app
|
||||||
|
// (projects should load beta work with care,
|
||||||
|
// otherwise your users won't get production work done!
|
||||||
|
//
|
||||||
|
if (reply.wreq.host_info.allow_beta_work) {
|
||||||
|
reply.wreq.beta_only = true;
|
||||||
|
if (config.debug_send) {
|
||||||
|
log_messages.printf(MSG_DEBUG,
|
||||||
|
"[HOST#%d] will accept beta work. Scanning for beta work.\n",
|
||||||
|
reply.host.id
|
||||||
|
);
|
||||||
|
}
|
||||||
|
scan_work_array(sreq, reply);
|
||||||
|
}
|
||||||
|
reply.wreq.beta_only = false;
|
||||||
|
|
||||||
|
// give next priority to results that were infeasible for some other host
|
||||||
|
//
|
||||||
|
reply.wreq.infeasible_only = true;
|
||||||
|
scan_work_array(sreq, reply);
|
||||||
|
|
||||||
|
reply.wreq.infeasible_only = false;
|
||||||
|
scan_work_array(sreq, reply);
|
||||||
|
|
||||||
|
// If user has selected apps but will accept any,
|
||||||
|
// and we haven't found any jobs for selected apps, try others
|
||||||
|
//
|
||||||
|
if (!reply.wreq.nresults && reply.wreq.host_info.allow_non_preferred_apps ) {
|
||||||
|
reply.wreq.user_apps_only = false;
|
||||||
|
preferred_app_message_index = reply.wreq.no_work_messages.size();
|
||||||
|
if (config.debug_send) {
|
||||||
|
log_messages.printf(MSG_DEBUG,
|
||||||
|
"[HOST#%d] is looking for work from a non-preferred application\n",
|
||||||
|
reply.host.id
|
||||||
|
);
|
||||||
|
}
|
||||||
|
scan_work_array(sreq, reply);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void send_work(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply) {
|
void send_work(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply) {
|
||||||
if (sreq.work_req_seconds <= 0) return;
|
if (sreq.work_req_seconds <= 0) return;
|
||||||
|
|
||||||
|
@ -1226,12 +1276,8 @@ void send_work(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply) {
|
||||||
|
|
||||||
if (config.debug_send) {
|
if (config.debug_send) {
|
||||||
log_messages.printf(MSG_DEBUG,
|
log_messages.printf(MSG_DEBUG,
|
||||||
"%s matchmaking scheduling; %s EDF sim\n",
|
"%s matchmaker scheduling; %s EDF sim\n",
|
||||||
#ifdef MATCHMAKER
|
config.matchmaker?"Using":"Not using",
|
||||||
"Using",
|
|
||||||
#else
|
|
||||||
"Not using",
|
|
||||||
#endif
|
|
||||||
config.workload_sim?"Using":"Not using"
|
config.workload_sim?"Using":"Not using"
|
||||||
);
|
);
|
||||||
log_messages.printf(MSG_DEBUG,
|
log_messages.printf(MSG_DEBUG,
|
||||||
|
@ -1276,67 +1322,16 @@ void send_work(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply) {
|
||||||
if (config.locality_scheduling) {
|
if (config.locality_scheduling) {
|
||||||
reply.wreq.infeasible_only = false;
|
reply.wreq.infeasible_only = false;
|
||||||
send_work_locality(sreq, reply);
|
send_work_locality(sreq, reply);
|
||||||
} else {
|
} else if (config.matchmaker) {
|
||||||
#ifdef MATCHMAKER
|
|
||||||
send_work_matchmaker(sreq, reply);
|
send_work_matchmaker(sreq, reply);
|
||||||
#else
|
} else {
|
||||||
reply.wreq.beta_only = false;
|
send_work_old(sreq, reply);
|
||||||
reply.wreq.user_apps_only = true;
|
|
||||||
|
|
||||||
// give top priority to results that require a 'reliable host'
|
|
||||||
//
|
|
||||||
if (reply.wreq.host_info.reliable) {
|
|
||||||
reply.wreq.reliable_only = true;
|
|
||||||
reply.wreq.infeasible_only = false;
|
|
||||||
scan_work_array(sreq, reply);
|
|
||||||
}
|
|
||||||
reply.wreq.reliable_only = false;
|
|
||||||
|
|
||||||
// give 2nd priority to results for a beta app
|
|
||||||
// (projects should load beta work with care,
|
|
||||||
// otherwise your users won't get production work done!
|
|
||||||
//
|
|
||||||
if (reply.wreq.host_info.allow_beta_work) {
|
|
||||||
reply.wreq.beta_only = true;
|
|
||||||
if (config.debug_send) {
|
|
||||||
log_messages.printf(MSG_DEBUG,
|
|
||||||
"[HOST#%d] will accept beta work. Scanning for beta work.\n",
|
|
||||||
reply.host.id
|
|
||||||
);
|
|
||||||
}
|
|
||||||
scan_work_array(sreq, reply);
|
|
||||||
}
|
|
||||||
reply.wreq.beta_only = false;
|
|
||||||
|
|
||||||
// give next priority to results that were infeasible for some other host
|
|
||||||
//
|
|
||||||
reply.wreq.infeasible_only = true;
|
|
||||||
scan_work_array(sreq, reply);
|
|
||||||
|
|
||||||
reply.wreq.infeasible_only = false;
|
|
||||||
scan_work_array(sreq, reply);
|
|
||||||
|
|
||||||
// If user has selected apps but will accept any,
|
|
||||||
// and we haven't found any jobs for selected apps, try others
|
|
||||||
//
|
|
||||||
if (!reply.wreq.nresults && reply.wreq.host_info.allow_non_preferred_apps ) {
|
|
||||||
reply.wreq.user_apps_only = false;
|
|
||||||
preferred_app_message_index = reply.wreq.no_work_messages.size();
|
|
||||||
if (config.debug_send) {
|
|
||||||
log_messages.printf(MSG_DEBUG,
|
|
||||||
"[HOST#%d] is looking for work from a non-preferred application\n",
|
|
||||||
reply.host.id
|
|
||||||
);
|
|
||||||
}
|
|
||||||
scan_work_array(sreq, reply);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
explain_to_user(reply);
|
explain_to_user(reply);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef MATCHMAKER
|
// Matchmaker scheduling code follows
|
||||||
|
|
||||||
struct JOB{
|
struct JOB{
|
||||||
int index;
|
int index;
|
||||||
|
@ -1420,28 +1415,25 @@ bool JOB::get_score(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply) {
|
||||||
|
|
||||||
score = 1;
|
score = 1;
|
||||||
|
|
||||||
// check if user has selected apps
|
// check if user has selected apps,
|
||||||
|
// and send beta work to beta users
|
||||||
//
|
//
|
||||||
if (!reply.wreq.host_info.allow_beta_work || config.distinct_beta_apps) {
|
if(app->beta && !config.distinct_beta_apps) {
|
||||||
if (app_not_selected(wu, sreq, reply)) {
|
|
||||||
if (!reply.wreq.host_info.allow_non_preferred_apps) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (reply.wreq.host_info.allow_non_preferred_apps) {
|
|
||||||
score += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if it's a beta user, try to send beta jobs
|
|
||||||
//
|
|
||||||
if (app->beta) {
|
|
||||||
if (reply.wreq.host_info.allow_beta_work) {
|
if (reply.wreq.host_info.allow_beta_work) {
|
||||||
score += 1;
|
score += 1;
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if (app_not_selected(wu, sreq, reply)) {
|
||||||
|
if (!reply.wreq.host_info.allow_non_preferred_apps) {
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
// Allow work to be sent, but it will not get a bump in its score
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
score += 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if job needs to get done fast, send to fast/reliable host
|
// if job needs to get done fast, send to fast/reliable host
|
||||||
|
@ -1636,8 +1628,10 @@ void JOB_SET::send(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply) {
|
||||||
void send_work_matchmaker(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply) {
|
void send_work_matchmaker(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply) {
|
||||||
int i, slots_scanned=0, slots_locked=0;
|
int i, slots_scanned=0, slots_locked=0;
|
||||||
JOB_SET jobs;
|
JOB_SET jobs;
|
||||||
int min_slots = 20;
|
int min_slots = config.mm_min_slots;
|
||||||
int max_slots = 50;
|
if (!min_slots) min_slots = ssp->max_wu_results/2;
|
||||||
|
int max_slots = config.mm_max_slots;
|
||||||
|
if (!max_slots) max_slots = ssp->max_wu_results;
|
||||||
int max_locked = 10;
|
int max_locked = 10;
|
||||||
int pid = getpid();
|
int pid = getpid();
|
||||||
|
|
||||||
|
@ -1668,7 +1662,7 @@ void send_work_matchmaker(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply) {
|
||||||
"score for %s: %f\n", wu_result.workunit.name, job.score
|
"score for %s: %f\n", wu_result.workunit.name, job.score
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (job.score > jobs.lowest_score()) {
|
if (job.score > jobs.lowest_score() || !jobs.request_satisfied()) {
|
||||||
ssp->wu_results[i].state = pid;
|
ssp->wu_results[i].state = pid;
|
||||||
unlock_sema();
|
unlock_sema();
|
||||||
if (wu_is_infeasible_slow(wu_result, sreq, reply)) {
|
if (wu_is_infeasible_slow(wu_result, sreq, reply)) {
|
||||||
|
@ -1692,6 +1686,5 @@ void send_work_matchmaker(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply) {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
const char *BOINC_RCSID_32dcd335e7 = "$Id$";
|
const char *BOINC_RCSID_32dcd335e7 = "$Id$";
|
||||||
|
|
Loading…
Reference in New Issue