cpu scheduling code cleanup

svn path=/trunk/boinc/; revision=9416
This commit is contained in:
David Anderson 2006-02-07 23:08:23 +00:00
parent 6a6e554ab1
commit 00cafefdad
4 changed files with 71 additions and 84 deletions

View File

@ -1597,3 +1597,15 @@ David 7 Feb 2006
client_types.h
cs_apps.C
cs_scheduler.C
David 7 Feb 2006
- Unravel the CPU scheduler code a little:
- split set_scheduler_modes() into two functions,
set_scheduler_mode() and set_work_fetch_mode().
- make a separate function, enforce_schedule(),
that does the actual starting and stopping of processes
client/
client_state.h
cs_apps.C
cs_scheduler.C

View File

@ -305,6 +305,7 @@ private:
bool schedule_largest_debt_project(double expected_pay_off);
bool schedule_earliest_deadline_result();
bool schedule_cpus();
void enforce_schedule();
bool start_apps();
bool handle_finished_apps();
void handle_file_xfer_apps();
@ -375,7 +376,8 @@ private:
int proj_min_results(PROJECT*, double);
bool rr_misses_deadline(double, double);
bool edf_misses_deadline(double);
void set_scheduler_modes();
void set_scheduler_mode();
void set_work_fetch_mode();
void generate_new_host_cpid();
// --------------- cs_statefile.C:

View File

@ -544,12 +544,10 @@ bool CLIENT_STATE::schedule_cpus() {
double expected_pay_off;
ACTIVE_TASK *atp;
PROJECT *p;
int retval, j;
double vm_limit, elapsed_time;
int j;
double elapsed_time;
unsigned int i;
SCOPE_MSG_LOG scope_messages(log_messages, CLIENT_MSG_LOG::DEBUG_TASK);
if (projects.size() == 0) return false;
if (results.size() == 0) return false;
@ -561,12 +559,10 @@ bool CLIENT_STATE::schedule_cpus() {
elapsed_time = gstate.now - cpu_sched_last_time;
if (must_schedule_cpus) {
must_schedule_cpus = false;
scope_messages.printf("CLIENT_STATE::schedule_cpus(): must schedule\n");
} else {
if (elapsed_time < (global_prefs.cpu_scheduling_period_minutes*60)) {
return false;
}
scope_messages.printf("CLIENT_STATE::schedule_cpus(): time %f\n", elapsed_time);
}
// mark file xfer results as completed;
@ -590,11 +586,11 @@ bool CLIENT_STATE::schedule_cpus() {
}
}
set_scheduler_modes();
set_scheduler_mode();
adjust_debts();
// mark active tasks as preempted
// MUST DO THIS AFTER accumulate_work()
// MUST DO THIS AFTER adjust_debts()
//
for (i=0; i<active_tasks.active_tasks.size(); i++) {
atp = active_tasks.active_tasks[i];
@ -624,13 +620,34 @@ bool CLIENT_STATE::schedule_cpus() {
}
}
// preempt, start, and resume tasks
enforce_schedule();
// reset work accounting
// do this at the end of schedule_cpus() because
// wall_cpu_time_this_period's can change as apps finish
//
vm_limit = (global_prefs.vm_max_used_pct/100.)*host_info.m_swap;
for (i=0; i<projects.size(); i++) {
p = projects[i];
p->wall_cpu_time_this_period = 0;
}
total_wall_cpu_time_this_period = 0;
total_cpu_time_this_period = 0;
cpu_sched_last_time = gstate.now;
set_client_state_dirty("schedule_cpus");
return true;
}
// preempt, start, and resume tasks
//
void CLIENT_STATE::enforce_schedule() {
double vm_limit = (global_prefs.vm_max_used_pct/100.)*host_info.m_swap;
unsigned int i;
ACTIVE_TASK *atp;
int retval;
for (i=0; i<active_tasks.active_tasks.size(); i++) {
atp = active_tasks.active_tasks[i];
scope_messages.printf("CLIENT_STATE::schedule_cpus(): project %s result %s state %d\n",
atp->result->project->project_name, atp->result->name, atp->scheduler_state);
if (atp->scheduler_state == CPU_SCHED_SCHEDULED
&& atp->next_scheduler_state == CPU_SCHED_PREEMPTED
) {
@ -655,24 +672,8 @@ bool CLIENT_STATE::schedule_cpus() {
}
atp->cpu_time_at_last_sched = atp->current_cpu_time;
}
// reset work accounting
// doing this at the end of schedule_cpus() because
// wall_cpu_time_this_period's can change as apps finish
//
for (i=0; i<projects.size(); i++) {
p = projects[i];
p->wall_cpu_time_this_period = 0;
}
total_wall_cpu_time_this_period = 0;
total_cpu_time_this_period = 0;
cpu_sched_last_time = gstate.now;
set_client_state_dirty("schedule_cpus");
return true;
}
// This is called when the client is initialized.
// Try to restart any tasks that were running when we last shut down.
//

View File

@ -1055,7 +1055,7 @@ bool CLIENT_STATE::should_get_work() {
// let it process for a while to get out of the CPU overload state.
//
if (!work_fetch_no_new_work) {
set_scheduler_modes();
set_work_fetch_mode();
}
return !work_fetch_no_new_work;
@ -1226,29 +1226,17 @@ bool CLIENT_STATE::rr_misses_deadline(double per_cpu_proc_rate, double rrs) {
return false;
}
// Decide on modes for work-fetch and CPU sched policies.
// Namely, set the variables
// - work_fetch_no_new_work
// - cpu_earliest_deadline_first
// and print a message if we're changing their value
// Decide on work-fetch policy
// Namely, set the variable work_fetch_no_new_work
// and print a message if we're changing its value
//
void CLIENT_STATE::set_scheduler_modes() {
#if 0
RESULT* rp;
unsigned int i;
#endif
void CLIENT_STATE::set_work_fetch_mode() {
bool should_not_fetch_work = false;
bool use_earliest_deadline_first = false;
double total_proc_rate = avg_proc_rate();
double per_cpu_proc_rate = total_proc_rate/ncpus;
SCOPE_MSG_LOG scope_messages(log_messages, CLIENT_MSG_LOG::DEBUG_SCHED_CPU);
double rrs = runnable_resource_share();
if (rr_misses_deadline(per_cpu_proc_rate, rrs)) {
// if round robin would miss a deadline, use EDF
//
use_earliest_deadline_first = true;
if (!no_work_for_a_cpu()) {
should_not_fetch_work = true;
}
@ -1264,41 +1252,8 @@ void CLIENT_STATE::set_scheduler_modes() {
}
}
}
#if 0
for (i=0; i<results.size(); i++) {
rp = results[i];
if (rp->computing_done()) continue;
if (rp->project->non_cpu_intensive) continue;
// Is the nearest deadline within a day?
//
if (rp->report_deadline - gstate.now < 60 * 60 * 24) {
result_has_deadline_problem = true;
rp->deadline_problem = true;
scope_messages.printf(
"set_scheduler_modes(): Less than 1 day until deadline.\n"
);
}
// is there a deadline < twice the users connect period?
//
if (rp->report_deadline - gstate.now < global_prefs.work_buf_min_days * SECONDS_PER_DAY * 2) {
result_has_deadline_problem = true;
rp->deadline_problem = true;
scope_messages.printf(
"set_scheduler_modes(): Deadline is before reconnect time.\n"
);
}
}
#endif
// display only when the policy changes to avoid once per second
//
if (work_fetch_no_new_work && !should_not_fetch_work) {
msg_printf(NULL, MSG_INFO,
"Allowing work fetch again."
);
msg_printf(NULL, MSG_INFO, "Allowing work fetch again.");
}
if (!work_fetch_no_new_work && should_not_fetch_work) {
@ -1306,6 +1261,25 @@ void CLIENT_STATE::set_scheduler_modes() {
"Suspending work fetch because computer is overcommitted."
);
}
work_fetch_no_new_work = should_not_fetch_work;
}
// Decide on CPU sched policy
// Namely, set the variable cpu_earliest_deadline_first
// and print a message if we're changing its value
//
void CLIENT_STATE::set_scheduler_mode() {
bool use_earliest_deadline_first = false;
double total_proc_rate = avg_proc_rate();
double per_cpu_proc_rate = total_proc_rate/ncpus;
double rrs = runnable_resource_share();
if (rr_misses_deadline(per_cpu_proc_rate, rrs)) {
// if round robin would miss a deadline, use EDF
//
use_earliest_deadline_first = true;
}
if (cpu_earliest_deadline_first && !use_earliest_deadline_first) {
msg_printf(NULL, MSG_INFO,
@ -1314,11 +1288,9 @@ void CLIENT_STATE::set_scheduler_modes() {
}
if (!cpu_earliest_deadline_first && use_earliest_deadline_first) {
msg_printf(NULL, MSG_INFO,
"Using critical-deadline-first scheduling because computer is overcommitted."
"Using earliest-deadline-first scheduling because computer is overcommitted."
);
}
work_fetch_no_new_work = should_not_fetch_work;
cpu_earliest_deadline_first = use_earliest_deadline_first;
}