2006-02-08 21:05:51 +00:00
|
|
|
// Berkeley Open Infrastructure for Network Computing
|
|
|
|
// http://boinc.berkeley.edu
|
|
|
|
// Copyright (C) 2005 University of California
|
|
|
|
//
|
|
|
|
// This is free software; you can redistribute it and/or
|
|
|
|
// modify it under the terms of the GNU Lesser General Public
|
|
|
|
// License as published by the Free Software Foundation;
|
|
|
|
// either version 2.1 of the License, or (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This software is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
|
|
// See the GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// To view the GNU Lesser General Public License visit
|
|
|
|
// http://www.gnu.org/copyleft/lesser.html
|
|
|
|
// or write to the Free Software Foundation, Inc.,
|
|
|
|
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
|
2006-08-22 20:58:57 +00:00
|
|
|
// CPU scheduling logic.
|
|
|
|
//
|
|
|
|
// Terminology:
|
|
|
|
//
|
|
|
|
// Episode
|
|
|
|
// The execution of a task is divided into "episodes".
|
|
|
|
// An episode starts then the application is executed,
|
|
|
|
// and ends when it exits or dies
|
|
|
|
// (e.g., because it's preempted and not left in memory,
|
|
|
|
// or the user quits BOINC, or the host is turned off).
|
|
|
|
// A task may checkpoint now and then.
|
|
|
|
// Each episode begins with the state of the last checkpoint.
|
|
|
|
//
|
|
|
|
// Debt interval
|
|
|
|
// The interval between consecutive executions of adjust_debts()
|
|
|
|
//
|
|
|
|
// Run interval
|
|
|
|
// If an app is running (not suspended), the interval
|
|
|
|
// during which it's been running.
|
|
|
|
|
2006-04-25 18:28:44 +00:00
|
|
|
#ifdef _WIN32
|
|
|
|
#include "boinc_win.h"
|
|
|
|
#endif
|
|
|
|
|
2006-02-08 21:05:51 +00:00
|
|
|
#include "client_msgs.h"
|
|
|
|
#include "client_state.h"
|
2006-06-15 23:15:27 +00:00
|
|
|
#include "util.h"
|
|
|
|
#include "log_flags.h"
|
2006-02-08 21:05:51 +00:00
|
|
|
|
|
|
|
using std::vector;
|
|
|
|
|
|
|
|
#define MAX_DEBT (86400)
|
|
|
|
// maximum project debt
|
|
|
|
|
|
|
|
#define CPU_PESSIMISM_FACTOR 0.9
|
|
|
|
// assume actual CPU utilization will be this multiple
|
|
|
|
// of what we've actually measured recently
|
|
|
|
|
2006-06-19 22:20:24 +00:00
|
|
|
static bool more_preemptable(ACTIVE_TASK* t0, ACTIVE_TASK* t1) {
|
|
|
|
// returning true means t1 is more preemptable than t0,
|
2006-06-16 18:52:25 +00:00
|
|
|
// the "largest" result is at the front of a heap,
|
|
|
|
// and we want the best replacement at the front,
|
|
|
|
//
|
2006-06-19 22:20:24 +00:00
|
|
|
if (t0->result->project->deadlines_missed && !t1->result->project->deadlines_missed) return false;
|
|
|
|
if (!t0->result->project->deadlines_missed && t1->result->project->deadlines_missed) return true;
|
|
|
|
if (t0->result->project->deadlines_missed && t1->result->project->deadlines_missed) {
|
|
|
|
if (t0->result->report_deadline > t1->result->report_deadline) return true;
|
2006-06-16 18:52:25 +00:00
|
|
|
return false;
|
|
|
|
} else {
|
2006-08-22 20:58:57 +00:00
|
|
|
double t0_episode_time = gstate.now - t0->run_interval_start_wall_time;
|
|
|
|
double t1_episode_time = gstate.now - t1->run_interval_start_wall_time;
|
2006-06-19 22:20:24 +00:00
|
|
|
if (t0_episode_time < t1_episode_time) return false;
|
|
|
|
if (t0_episode_time > t1_episode_time) return true;
|
|
|
|
if (t0->result->report_deadline > t1->result->report_deadline) return true;
|
2006-06-16 18:52:25 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-02-08 21:05:51 +00:00
|
|
|
// Choose a "best" runnable result for each project
|
|
|
|
//
|
|
|
|
// Values are returned in project->next_runnable_result
|
|
|
|
// (skip projects for which this is already non-NULL)
|
|
|
|
//
|
|
|
|
// Don't choose results with already_selected == true;
|
|
|
|
// mark chosen results as already_selected.
|
|
|
|
//
|
|
|
|
// The preference order:
|
|
|
|
// 1. results with active tasks that are running
|
|
|
|
// 2. results with active tasks that are preempted (but have a process)
|
|
|
|
// 3. results with active tasks that have no process
|
|
|
|
// 4. results with no active task
|
|
|
|
//
|
|
|
|
void CLIENT_STATE::assign_results_to_projects() {
|
|
|
|
unsigned int i;
|
|
|
|
RESULT* rp;
|
|
|
|
PROJECT* project;
|
|
|
|
|
|
|
|
// scan results with an ACTIVE_TASK
|
|
|
|
//
|
2006-06-15 23:15:27 +00:00
|
|
|
for (i=0; i<active_tasks.active_tasks.size(); i++) {
|
2006-02-08 21:05:51 +00:00
|
|
|
ACTIVE_TASK *atp = active_tasks.active_tasks[i];
|
|
|
|
rp = atp->result;
|
|
|
|
if (rp->already_selected) continue;
|
|
|
|
if (!rp->runnable()) continue;
|
|
|
|
project = rp->project;
|
|
|
|
if (!project->next_runnable_result) {
|
|
|
|
project->next_runnable_result = rp;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// see if this task is "better" than the one currently
|
|
|
|
// selected for this project
|
|
|
|
//
|
|
|
|
ACTIVE_TASK *next_atp = lookup_active_task_by_result(
|
|
|
|
project->next_runnable_result
|
|
|
|
);
|
|
|
|
|
|
|
|
if ((next_atp->task_state == PROCESS_UNINITIALIZED && atp->process_exists())
|
|
|
|
|| (next_atp->scheduler_state == CPU_SCHED_PREEMPTED
|
|
|
|
&& atp->scheduler_state == CPU_SCHED_SCHEDULED)
|
|
|
|
) {
|
|
|
|
project->next_runnable_result = atp->result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now consider results that don't have an active task
|
|
|
|
//
|
|
|
|
for (i=0; i<results.size(); i++) {
|
|
|
|
rp = results[i];
|
|
|
|
if (rp->already_selected) continue;
|
|
|
|
if (lookup_active_task_by_result(rp)) continue;
|
|
|
|
if (!rp->runnable()) continue;
|
|
|
|
|
|
|
|
project = rp->project;
|
|
|
|
if (project->next_runnable_result) continue;
|
|
|
|
|
2006-06-16 18:52:25 +00:00
|
|
|
// don't start results if project has > 2 uploads in progress.
|
|
|
|
// This avoids creating an unbounded number of completed
|
|
|
|
// results for a project that can download and compute
|
|
|
|
// faster than it can upload.
|
2006-02-08 21:05:51 +00:00
|
|
|
//
|
|
|
|
if (project->nactive_uploads > 2) continue;
|
|
|
|
|
|
|
|
project->next_runnable_result = rp;
|
|
|
|
}
|
|
|
|
|
|
|
|
// mark selected results, so CPU scheduler won't try to consider
|
|
|
|
// a result more than once
|
|
|
|
//
|
|
|
|
for (i=0; i<projects.size(); i++) {
|
|
|
|
project = projects[i];
|
|
|
|
if (project->next_runnable_result) {
|
|
|
|
project->next_runnable_result->already_selected = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-06-21 17:34:55 +00:00
|
|
|
// Among projects with a "next runnable result",
|
|
|
|
// find the project P with the greatest anticipated debt,
|
|
|
|
// and return its next runnable result
|
2006-02-08 21:05:51 +00:00
|
|
|
//
|
2006-06-21 17:34:55 +00:00
|
|
|
RESULT* CLIENT_STATE::largest_debt_project_best_result() {
|
2006-02-08 21:05:51 +00:00
|
|
|
PROJECT *best_project = NULL;
|
|
|
|
double best_debt = -MAX_DEBT;
|
|
|
|
bool first = true;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i=0; i<projects.size(); i++) {
|
|
|
|
PROJECT* p = projects[i];
|
|
|
|
if (!p->next_runnable_result) continue;
|
|
|
|
if (p->non_cpu_intensive) continue;
|
|
|
|
if (first || p->anticipated_debt > best_debt) {
|
|
|
|
first = false;
|
|
|
|
best_project = p;
|
|
|
|
best_debt = p->anticipated_debt;
|
|
|
|
}
|
|
|
|
}
|
2006-06-17 16:26:29 +00:00
|
|
|
if (!best_project) return NULL;
|
2006-02-08 21:05:51 +00:00
|
|
|
|
2006-06-21 22:08:20 +00:00
|
|
|
if (log_flags.cpu_sched_debug) {
|
2006-06-15 23:15:27 +00:00
|
|
|
msg_printf(best_project, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[cpu_sched_debug] highest debt: %f %s",
|
2006-06-15 23:15:27 +00:00
|
|
|
best_project->anticipated_debt,
|
|
|
|
best_project->next_runnable_result->name
|
|
|
|
);
|
|
|
|
}
|
2006-06-17 16:26:29 +00:00
|
|
|
RESULT* rp = best_project->next_runnable_result;
|
2006-02-08 21:05:51 +00:00
|
|
|
best_project->next_runnable_result = 0;
|
2006-06-17 16:26:29 +00:00
|
|
|
return rp;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
2006-06-21 17:34:55 +00:00
|
|
|
// Return earliest-deadline result from a project with deadlines_missed>0
|
2006-02-08 21:05:51 +00:00
|
|
|
//
|
2006-06-21 17:34:55 +00:00
|
|
|
RESULT* CLIENT_STATE::earliest_deadline_result() {
|
2006-02-08 21:05:51 +00:00
|
|
|
RESULT *best_result = NULL;
|
|
|
|
unsigned int i;
|
|
|
|
|
2006-06-19 22:20:24 +00:00
|
|
|
for (i=0; i<results.size(); i++) {
|
|
|
|
RESULT* rp = results[i];
|
2006-02-08 21:05:51 +00:00
|
|
|
if (!rp->runnable()) continue;
|
|
|
|
if (rp->project->non_cpu_intensive) continue;
|
|
|
|
if (rp->already_selected) continue;
|
2006-06-19 22:20:24 +00:00
|
|
|
if (!rp->project->deadlines_missed) continue;
|
2006-06-15 23:15:27 +00:00
|
|
|
if (!best_result || rp->report_deadline < best_result->report_deadline) {
|
2006-02-08 21:05:51 +00:00
|
|
|
best_result = rp;
|
|
|
|
}
|
|
|
|
}
|
2006-06-17 16:26:29 +00:00
|
|
|
if (!best_result) return NULL;
|
2006-02-08 21:05:51 +00:00
|
|
|
|
2006-06-21 22:08:20 +00:00
|
|
|
if (log_flags.cpu_sched_debug) {
|
2006-06-15 23:15:27 +00:00
|
|
|
msg_printf(best_result->project, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[cpu_sched_debug] earliest deadline: %f %s",
|
2006-06-15 23:15:27 +00:00
|
|
|
best_result->report_deadline, best_result->name
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2006-06-17 16:26:29 +00:00
|
|
|
return best_result;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// adjust project debts (short, long-term)
|
|
|
|
//
|
|
|
|
void CLIENT_STATE::adjust_debts() {
|
|
|
|
unsigned int i;
|
|
|
|
double total_long_term_debt = 0;
|
|
|
|
double total_short_term_debt = 0;
|
|
|
|
double prrs, rrs;
|
|
|
|
int nprojects=0, nrprojects=0;
|
|
|
|
PROJECT *p;
|
|
|
|
double share_frac;
|
2006-08-22 20:58:57 +00:00
|
|
|
double wall_cpu_time = gstate.now - debt_interval_start;
|
2006-02-08 21:05:51 +00:00
|
|
|
|
2006-07-13 15:26:41 +00:00
|
|
|
if (wall_cpu_time < 1) return;
|
|
|
|
|
2006-02-08 21:05:51 +00:00
|
|
|
// Total up total and per-project "wall CPU" since last CPU reschedule.
|
|
|
|
// "Wall CPU" is the wall time during which a task was
|
|
|
|
// runnable (at the OS level).
|
|
|
|
//
|
|
|
|
// We use wall CPU for debt calculation
|
|
|
|
// (instead of reported actual CPU) for two reasons:
|
|
|
|
// 1) the process might have paged a lot, so the actual CPU
|
|
|
|
// may be a lot less than wall CPU
|
|
|
|
// 2) BOINC relies on apps to report their CPU time.
|
|
|
|
// Sometimes there are bugs and apps report zero CPU.
|
|
|
|
// It's safer not to trust them.
|
|
|
|
//
|
|
|
|
for (i=0; i<active_tasks.active_tasks.size(); i++) {
|
|
|
|
ACTIVE_TASK* atp = active_tasks.active_tasks[i];
|
|
|
|
if (atp->scheduler_state != CPU_SCHED_SCHEDULED) continue;
|
2006-06-26 17:09:23 +00:00
|
|
|
if (atp->wup->project->non_cpu_intensive) continue;
|
2006-02-08 21:05:51 +00:00
|
|
|
|
2006-08-22 20:58:57 +00:00
|
|
|
atp->result->project->wall_cpu_time_this_debt_interval += wall_cpu_time;
|
|
|
|
total_wall_cpu_time_this_debt_interval += wall_cpu_time;
|
|
|
|
total_cpu_time_this_debt_interval += atp->current_cpu_time - atp->debt_interval_start_cpu_time;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
time_stats.update_cpu_efficiency(
|
2006-08-22 20:58:57 +00:00
|
|
|
total_wall_cpu_time_this_debt_interval, total_cpu_time_this_debt_interval
|
2006-02-08 21:05:51 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
rrs = runnable_resource_share();
|
|
|
|
prrs = potentially_runnable_resource_share();
|
|
|
|
|
|
|
|
for (i=0; i<projects.size(); i++) {
|
|
|
|
p = projects[i];
|
2006-06-15 23:15:27 +00:00
|
|
|
|
2006-02-08 21:05:51 +00:00
|
|
|
// potentially_runnable() can be false right after a result completes,
|
|
|
|
// but we still need to update its LTD.
|
2006-08-22 20:58:57 +00:00
|
|
|
// In this case its wall_cpu_time_this_debt_interval will be nonzero.
|
2006-02-08 21:05:51 +00:00
|
|
|
//
|
2006-08-22 20:58:57 +00:00
|
|
|
if (!(p->potentially_runnable()) && p->wall_cpu_time_this_debt_interval) {
|
2006-02-08 21:05:51 +00:00
|
|
|
prrs += p->resource_share;
|
2006-06-15 23:15:27 +00:00
|
|
|
}
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i=0; i<projects.size(); i++) {
|
|
|
|
p = projects[i];
|
|
|
|
if (p->non_cpu_intensive) continue;
|
|
|
|
nprojects++;
|
|
|
|
|
|
|
|
// adjust long-term debts
|
|
|
|
//
|
2006-08-22 20:58:57 +00:00
|
|
|
if (p->potentially_runnable() || p->wall_cpu_time_this_debt_interval) {
|
2006-02-08 21:05:51 +00:00
|
|
|
share_frac = p->resource_share/prrs;
|
2006-08-22 20:58:57 +00:00
|
|
|
p->long_term_debt += share_frac*total_wall_cpu_time_this_debt_interval
|
|
|
|
- p->wall_cpu_time_this_debt_interval;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
total_long_term_debt += p->long_term_debt;
|
|
|
|
|
|
|
|
// adjust short term debts
|
|
|
|
//
|
|
|
|
if (p->runnable()) {
|
|
|
|
nrprojects++;
|
|
|
|
share_frac = p->resource_share/rrs;
|
2006-08-22 20:58:57 +00:00
|
|
|
p->short_term_debt += share_frac*total_wall_cpu_time_this_debt_interval
|
|
|
|
- p->wall_cpu_time_this_debt_interval;
|
2006-02-08 21:05:51 +00:00
|
|
|
total_short_term_debt += p->short_term_debt;
|
|
|
|
} else {
|
|
|
|
p->short_term_debt = 0;
|
|
|
|
p->anticipated_debt = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nprojects==0) return;
|
|
|
|
|
|
|
|
// long-term debt:
|
|
|
|
// normalize so mean is zero,
|
|
|
|
// short-term debt:
|
|
|
|
// normalize so mean is zero, and limit abs value at MAX_DEBT
|
|
|
|
//
|
|
|
|
double avg_long_term_debt = total_long_term_debt / nprojects;
|
|
|
|
double avg_short_term_debt = 0;
|
|
|
|
if (nrprojects) {
|
|
|
|
avg_short_term_debt = total_short_term_debt / nrprojects;
|
|
|
|
}
|
|
|
|
for (i=0; i<projects.size(); i++) {
|
|
|
|
p = projects[i];
|
|
|
|
if (p->non_cpu_intensive) continue;
|
|
|
|
if (p->runnable()) {
|
|
|
|
p->short_term_debt -= avg_short_term_debt;
|
|
|
|
if (p->short_term_debt > MAX_DEBT) {
|
|
|
|
p->short_term_debt = MAX_DEBT;
|
|
|
|
}
|
|
|
|
if (p->short_term_debt < -MAX_DEBT) {
|
|
|
|
p->short_term_debt = -MAX_DEBT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
p->long_term_debt -= avg_long_term_debt;
|
2006-07-09 21:05:21 +00:00
|
|
|
if (log_flags.debt_debug) {
|
|
|
|
msg_printf(0, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[debt_debug] adjust_debts(): project %s: STD %f, LTD %f",
|
2006-07-09 21:05:21 +00:00
|
|
|
p->project_name, p->short_term_debt, p->long_term_debt
|
|
|
|
);
|
|
|
|
}
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
2006-06-15 23:15:27 +00:00
|
|
|
|
|
|
|
// reset work accounting
|
|
|
|
//
|
|
|
|
for (i=0; i<projects.size(); i++) {
|
|
|
|
p = projects[i];
|
2006-08-22 20:58:57 +00:00
|
|
|
p->wall_cpu_time_this_debt_interval = 0.0;
|
2006-06-15 23:15:27 +00:00
|
|
|
}
|
2006-06-17 16:26:29 +00:00
|
|
|
for (i = 0; i < active_tasks.active_tasks.size(); ++i) {
|
|
|
|
ACTIVE_TASK* atp = active_tasks.active_tasks[i];
|
2006-08-22 20:58:57 +00:00
|
|
|
atp->debt_interval_start_cpu_time = atp->current_cpu_time;
|
2006-06-17 16:26:29 +00:00
|
|
|
}
|
2006-08-22 20:58:57 +00:00
|
|
|
total_wall_cpu_time_this_debt_interval = 0.0;
|
|
|
|
total_cpu_time_this_debt_interval = 0.0;
|
|
|
|
debt_interval_start = gstate.now;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-06-16 18:52:25 +00:00
|
|
|
// Decide whether to run the CPU scheduler.
|
|
|
|
// This is called periodically.
|
2006-06-17 16:26:29 +00:00
|
|
|
// Scheduled tasks are placed in order of urgency for scheduling in the ordered_scheduled_results vector
|
2006-02-08 21:05:51 +00:00
|
|
|
//
|
2006-06-16 18:52:25 +00:00
|
|
|
bool CLIENT_STATE::possibly_schedule_cpus() {
|
2006-02-08 21:05:51 +00:00
|
|
|
double elapsed_time;
|
2006-06-19 16:21:35 +00:00
|
|
|
static double last_reschedule=0;
|
2006-06-15 23:15:27 +00:00
|
|
|
|
2006-02-08 21:05:51 +00:00
|
|
|
if (projects.size() == 0) return false;
|
|
|
|
if (results.size() == 0) return false;
|
|
|
|
|
|
|
|
// Reschedule every cpu_sched_period seconds,
|
|
|
|
// or if must_schedule_cpus is set
|
|
|
|
// (meaning a new result is available, or a CPU has been freed).
|
|
|
|
//
|
2006-06-19 16:21:35 +00:00
|
|
|
elapsed_time = gstate.now - last_reschedule;
|
2006-06-15 23:15:27 +00:00
|
|
|
if (elapsed_time >= gstate.global_prefs.cpu_scheduling_period_minutes * 60) {
|
2006-06-19 16:21:35 +00:00
|
|
|
request_schedule_cpus("Scheduling period elapsed.");
|
2006-06-15 23:15:27 +00:00
|
|
|
}
|
2006-02-08 21:05:51 +00:00
|
|
|
|
2006-06-15 23:15:27 +00:00
|
|
|
if (!must_schedule_cpus) return false;
|
2006-06-19 16:21:35 +00:00
|
|
|
last_reschedule = now;
|
2006-06-15 23:15:27 +00:00
|
|
|
must_schedule_cpus = false;
|
2006-06-16 18:52:25 +00:00
|
|
|
schedule_cpus();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-06-21 17:34:55 +00:00
|
|
|
void CLIENT_STATE::print_deadline_misses() {
|
|
|
|
unsigned int i;
|
|
|
|
RESULT* rp;
|
|
|
|
PROJECT* p;
|
|
|
|
for (i=0; i<results.size(); i++){
|
|
|
|
rp = results[i];
|
|
|
|
if (rp->rr_sim_misses_deadline && !rp->last_rr_sim_missed_deadline) {
|
|
|
|
msg_printf(rp->project, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[cpu_sched_debug] Result %s projected to miss deadline.", rp->name
|
2006-06-21 17:34:55 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
else if (!rp->rr_sim_misses_deadline && rp->last_rr_sim_missed_deadline) {
|
|
|
|
msg_printf(rp->project, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[cpu_sched_debug] Result %s projected to meet deadline.", rp->name
|
2006-06-21 17:34:55 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (i=0; i<projects.size(); i++) {
|
|
|
|
p = projects[i];
|
|
|
|
if (p->rr_sim_deadlines_missed) {
|
|
|
|
msg_printf(p, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[cpu_sched_debug] Project has %d projected deadline misses",
|
2006-06-21 17:34:55 +00:00
|
|
|
p->rr_sim_deadlines_missed
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-06-16 18:52:25 +00:00
|
|
|
// CPU scheduler - decide which results to run.
|
2006-06-21 17:34:55 +00:00
|
|
|
// output: sets ordered_scheduled_result.
|
2006-06-16 18:52:25 +00:00
|
|
|
//
|
|
|
|
void CLIENT_STATE::schedule_cpus() {
|
|
|
|
RESULT* rp;
|
|
|
|
PROJECT* p;
|
2006-10-02 23:42:38 +00:00
|
|
|
ACTIVE_TASK* atp;
|
2006-06-16 18:52:25 +00:00
|
|
|
double expected_pay_off;
|
|
|
|
unsigned int i;
|
2006-06-21 17:34:55 +00:00
|
|
|
double rrs = runnable_resource_share();
|
2006-06-15 23:15:27 +00:00
|
|
|
|
2006-06-21 17:34:55 +00:00
|
|
|
// do round-robin simulation to find what results miss deadline,
|
2006-06-15 23:15:27 +00:00
|
|
|
//
|
2006-08-23 21:14:47 +00:00
|
|
|
rr_simulation();
|
2006-06-21 22:08:20 +00:00
|
|
|
if (log_flags.cpu_sched_debug) {
|
2006-06-21 17:34:55 +00:00
|
|
|
print_deadline_misses();
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
2006-06-15 23:15:27 +00:00
|
|
|
// set temporary variables
|
2006-02-08 21:05:51 +00:00
|
|
|
//
|
2006-06-15 23:15:27 +00:00
|
|
|
for (i=0; i<results.size(); i++) {
|
2006-06-16 18:52:25 +00:00
|
|
|
rp = results[i];
|
|
|
|
rp->already_selected = false;
|
2006-06-15 23:15:27 +00:00
|
|
|
}
|
2006-02-08 21:05:51 +00:00
|
|
|
for (i=0; i<projects.size(); i++) {
|
2006-06-16 18:52:25 +00:00
|
|
|
p = projects[i];
|
|
|
|
p->next_runnable_result = NULL;
|
|
|
|
p->nactive_uploads = 0;
|
|
|
|
p->anticipated_debt = p->short_term_debt;
|
2006-06-19 22:20:24 +00:00
|
|
|
p->deadlines_missed = p->rr_sim_deadlines_missed;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
for (i=0; i<file_xfers->file_xfers.size(); i++) {
|
|
|
|
FILE_XFER* fxp = file_xfers->file_xfers[i];
|
|
|
|
if (fxp->is_upload) {
|
|
|
|
fxp->fip->project->nactive_uploads++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
adjust_debts();
|
|
|
|
|
2006-06-15 23:15:27 +00:00
|
|
|
expected_pay_off = gstate.global_prefs.cpu_scheduling_period_minutes * 60;
|
|
|
|
ordered_scheduled_results.clear();
|
2006-10-02 23:42:38 +00:00
|
|
|
double ram_left = available_ram();
|
2006-06-17 16:26:29 +00:00
|
|
|
|
2006-06-21 17:34:55 +00:00
|
|
|
// First choose results from projects with P.deadlines_missed>0
|
|
|
|
//
|
2006-06-15 23:15:27 +00:00
|
|
|
while ((int)ordered_scheduled_results.size() < ncpus) {
|
2006-06-21 17:34:55 +00:00
|
|
|
rp = earliest_deadline_result();
|
2006-06-17 16:26:29 +00:00
|
|
|
if (!rp) break;
|
2006-10-03 17:14:56 +00:00
|
|
|
rp->already_selected = true;
|
|
|
|
|
|
|
|
// see if it fits in available RAM
|
|
|
|
//
|
2006-10-02 23:42:38 +00:00
|
|
|
atp = lookup_active_task_by_result(rp);
|
|
|
|
if (atp) {
|
2006-10-03 17:14:56 +00:00
|
|
|
if (atp->procinfo.working_set_size_smoothed > ram_left) {
|
|
|
|
if (log_flags.cpu_sched_debug) {
|
2006-10-03 19:44:54 +00:00
|
|
|
msg_printf(rp->project, MSG_INFO,
|
2006-10-03 17:14:56 +00:00
|
|
|
"[cpu_sched_debug] %s misses deadline but too large: %.2fMB",
|
|
|
|
rp->name, atp->procinfo.working_set_size_smoothed/MEGA
|
|
|
|
);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2006-10-02 23:42:38 +00:00
|
|
|
ram_left -= atp->procinfo.working_set_size_smoothed;
|
|
|
|
}
|
2006-06-17 16:26:29 +00:00
|
|
|
|
2006-06-21 17:34:55 +00:00
|
|
|
rp->project->anticipated_debt -= (1 - rp->project->resource_share / rrs) * expected_pay_off;
|
2006-06-19 22:20:24 +00:00
|
|
|
rp->project->deadlines_missed--;
|
2006-08-02 16:57:09 +00:00
|
|
|
if (log_flags.cpu_sched_debug) {
|
2006-10-03 19:44:54 +00:00
|
|
|
msg_printf(rp->project, MSG_INFO,
|
|
|
|
"[cpu_sched_debug] scheduling (deadline) %s",
|
|
|
|
rp->name
|
|
|
|
);
|
2006-08-02 16:57:09 +00:00
|
|
|
}
|
2006-06-17 16:26:29 +00:00
|
|
|
ordered_scheduled_results.push_back(rp);
|
2006-06-19 16:21:35 +00:00
|
|
|
}
|
2006-06-15 23:15:27 +00:00
|
|
|
|
2006-06-21 17:34:55 +00:00
|
|
|
// Next, choose results from projects with large debt
|
2006-06-19 22:20:24 +00:00
|
|
|
//
|
2006-06-15 23:15:27 +00:00
|
|
|
while ((int)ordered_scheduled_results.size() < ncpus) {
|
|
|
|
assign_results_to_projects();
|
2006-06-21 17:34:55 +00:00
|
|
|
rp = largest_debt_project_best_result();
|
2006-06-17 16:26:29 +00:00
|
|
|
if (!rp) break;
|
2006-10-02 23:42:38 +00:00
|
|
|
atp = lookup_active_task_by_result(rp);
|
|
|
|
if (atp) {
|
2006-10-03 17:14:56 +00:00
|
|
|
if (atp->procinfo.working_set_size_smoothed > ram_left) {
|
|
|
|
if (log_flags.cpu_sched_debug) {
|
|
|
|
msg_printf(NULL, MSG_INFO,
|
|
|
|
"[cpu_sched_debug] %s too large: %.2fMB",
|
|
|
|
rp->name, atp->procinfo.working_set_size_smoothed/MEGA
|
|
|
|
);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2006-10-02 23:42:38 +00:00
|
|
|
ram_left -= atp->procinfo.working_set_size_smoothed;
|
|
|
|
}
|
2006-06-21 17:34:55 +00:00
|
|
|
rp->project->anticipated_debt -= (1 - rp->project->resource_share / rrs) * expected_pay_off;
|
2006-08-02 16:57:09 +00:00
|
|
|
if (log_flags.cpu_sched_debug) {
|
2006-09-07 20:39:25 +00:00
|
|
|
msg_printf(NULL, MSG_INFO, "[cpu_sched_debug] scheduling (regular) %s", rp->name);
|
2006-08-02 16:57:09 +00:00
|
|
|
}
|
2006-06-17 16:26:29 +00:00
|
|
|
ordered_scheduled_results.push_back(rp);
|
2006-06-19 16:21:35 +00:00
|
|
|
}
|
2006-06-15 23:15:27 +00:00
|
|
|
|
2006-06-23 20:05:12 +00:00
|
|
|
request_enforce_schedule("schedule_cpus");
|
2006-06-15 23:15:27 +00:00
|
|
|
set_client_state_dirty("schedule_cpus");
|
|
|
|
}
|
|
|
|
|
2006-06-21 17:34:55 +00:00
|
|
|
// make a list of preemptable tasks, ordered by their preemptability.
|
2006-06-19 22:20:24 +00:00
|
|
|
//
|
|
|
|
void CLIENT_STATE::make_running_task_heap(
|
2006-06-21 17:34:55 +00:00
|
|
|
vector<ACTIVE_TASK*> &running_tasks
|
2006-06-19 22:20:24 +00:00
|
|
|
) {
|
|
|
|
unsigned int i;
|
|
|
|
ACTIVE_TASK* atp;
|
|
|
|
|
|
|
|
for (i=0; i<active_tasks.active_tasks.size(); i++) {
|
|
|
|
atp = active_tasks.active_tasks[i];
|
|
|
|
if (atp->result->project->non_cpu_intensive) continue;
|
2006-08-11 04:16:23 +00:00
|
|
|
if (!atp->result->runnable()) continue;
|
2006-08-04 23:07:11 +00:00
|
|
|
if (atp->task_state != PROCESS_EXECUTING) continue;
|
2006-06-21 17:34:55 +00:00
|
|
|
running_tasks.push_back(atp);
|
2006-06-19 22:20:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
std::make_heap(
|
2006-06-21 17:34:55 +00:00
|
|
|
running_tasks.begin(),
|
|
|
|
running_tasks.end(),
|
2006-06-19 22:20:24 +00:00
|
|
|
more_preemptable
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enforce the CPU schedule.
|
2006-06-21 17:34:55 +00:00
|
|
|
// Inputs:
|
|
|
|
// ordered_scheduled_results
|
|
|
|
// List of tasks that should (ideally) run, set by schedule_cpus().
|
|
|
|
// Most important tasks (e.g. early deadline) are first
|
|
|
|
// Method:
|
|
|
|
// Make a list "running_tasks" of currently running tasks
|
|
|
|
// Most preemptable tasks are first in list.
|
|
|
|
// Details:
|
|
|
|
// Initially, each task's scheduler_state is PREEMPTED or SCHEDULED
|
|
|
|
// depending on whether or not it is running.
|
|
|
|
// This function sets each task's next_scheduler_state,
|
|
|
|
// and at the end it starts/resumes and preempts tasks
|
|
|
|
// based on scheduler_state and next_scheduler_state.
|
2006-06-17 16:26:29 +00:00
|
|
|
//
|
2006-06-15 23:15:27 +00:00
|
|
|
bool CLIENT_STATE::enforce_schedule() {
|
2006-06-16 18:52:25 +00:00
|
|
|
unsigned int i;
|
2006-06-19 22:20:24 +00:00
|
|
|
ACTIVE_TASK* atp;
|
2006-06-21 17:34:55 +00:00
|
|
|
vector<ACTIVE_TASK*> running_tasks;
|
2006-06-15 23:15:27 +00:00
|
|
|
|
2006-08-06 16:14:33 +00:00
|
|
|
// Do this only when requested, not once per second.
|
2006-06-19 22:20:24 +00:00
|
|
|
//
|
|
|
|
if (!must_enforce_cpu_schedule) return false;
|
2006-06-15 23:15:27 +00:00
|
|
|
must_enforce_cpu_schedule = false;
|
2006-06-16 18:52:25 +00:00
|
|
|
bool action = false;
|
2006-06-15 23:15:27 +00:00
|
|
|
|
2006-09-07 17:59:34 +00:00
|
|
|
if (log_flags.cpu_sched_debug) {
|
2006-09-07 20:39:25 +00:00
|
|
|
msg_printf(0, MSG_INFO, "[cpu_sched_debug] Enforcing schedule");
|
2006-06-15 23:15:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// set temporary variables
|
2006-06-16 18:52:25 +00:00
|
|
|
//
|
2006-06-15 23:15:27 +00:00
|
|
|
for (i=0; i<projects.size(); i++){
|
2006-06-19 22:20:24 +00:00
|
|
|
projects[i]->deadlines_missed = projects[i]->rr_sim_deadlines_missed;
|
2006-06-15 23:15:27 +00:00
|
|
|
}
|
|
|
|
for (i=0; i< active_tasks.active_tasks.size(); i++) {
|
2006-06-19 22:20:24 +00:00
|
|
|
atp = active_tasks.active_tasks[i];
|
2006-10-03 22:50:13 +00:00
|
|
|
atp->too_large = false;
|
2006-06-21 17:34:55 +00:00
|
|
|
if (atp->result->runnable()) {
|
|
|
|
atp->next_scheduler_state = atp->scheduler_state;
|
|
|
|
} else {
|
|
|
|
atp->next_scheduler_state = CPU_SCHED_PREEMPTED;
|
|
|
|
}
|
2006-06-15 23:15:27 +00:00
|
|
|
}
|
|
|
|
|
2006-10-02 23:42:38 +00:00
|
|
|
// make list of currently running tasks
|
2006-06-21 17:34:55 +00:00
|
|
|
//
|
|
|
|
make_running_task_heap(running_tasks);
|
2006-06-19 22:20:24 +00:00
|
|
|
|
|
|
|
// if there are more running tasks than ncpus,
|
|
|
|
// then mark the extras for preemption
|
|
|
|
//
|
2006-06-21 17:34:55 +00:00
|
|
|
while (running_tasks.size() > (unsigned int)ncpus) {
|
|
|
|
running_tasks[0]->next_scheduler_state = CPU_SCHED_PREEMPTED;
|
2006-06-15 23:15:27 +00:00
|
|
|
std::pop_heap(
|
2006-06-21 17:34:55 +00:00
|
|
|
running_tasks.begin(),
|
|
|
|
running_tasks.end(),
|
2006-06-19 22:20:24 +00:00
|
|
|
more_preemptable
|
2006-06-15 23:15:27 +00:00
|
|
|
);
|
2006-06-21 17:34:55 +00:00
|
|
|
running_tasks.pop_back();
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
2006-10-02 23:42:38 +00:00
|
|
|
double ram_left = available_ram();
|
2006-09-05 19:00:59 +00:00
|
|
|
|
|
|
|
if (log_flags.mem_usage_debug) {
|
|
|
|
msg_printf(0, MSG_INFO,
|
2006-10-03 17:14:56 +00:00
|
|
|
"[mem_usage_debug] enforce: available RAM %.2fMB",
|
|
|
|
ram_left/MEGA
|
2006-09-05 19:00:59 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// keep track of how many tasks we plan on running
|
|
|
|
// (i.e. have next_scheduler_state = SCHEDULED)
|
2006-06-21 17:34:55 +00:00
|
|
|
//
|
2006-09-07 18:19:25 +00:00
|
|
|
int nrunning = (int)running_tasks.size();
|
2006-06-21 17:34:55 +00:00
|
|
|
|
2006-06-19 22:20:24 +00:00
|
|
|
// Loop through the scheduled results
|
2006-09-05 19:00:59 +00:00
|
|
|
// to see if they should preempt a running task
|
2006-06-19 22:20:24 +00:00
|
|
|
//
|
2006-06-15 23:15:27 +00:00
|
|
|
for (i=0; i<ordered_scheduled_results.size(); i++) {
|
2006-06-19 22:20:24 +00:00
|
|
|
RESULT* rp = ordered_scheduled_results[i];
|
|
|
|
|
|
|
|
// See if the result is already running.
|
|
|
|
//
|
|
|
|
atp = NULL;
|
2006-06-21 17:34:55 +00:00
|
|
|
for (vector<ACTIVE_TASK*>::iterator it = running_tasks.begin(); it != running_tasks.end(); it++) {
|
2006-06-15 23:15:27 +00:00
|
|
|
ACTIVE_TASK *atp1 = *it;
|
|
|
|
if (atp1 && atp1->result == rp) {
|
2006-06-19 22:20:24 +00:00
|
|
|
// The task is already running; remove it from the heap
|
|
|
|
//
|
2006-06-15 23:15:27 +00:00
|
|
|
atp = atp1;
|
2006-10-02 17:44:27 +00:00
|
|
|
it = running_tasks.erase(it);
|
2006-06-16 18:52:25 +00:00
|
|
|
std::make_heap(
|
2006-06-21 17:34:55 +00:00
|
|
|
running_tasks.begin(),
|
|
|
|
running_tasks.end(),
|
2006-06-19 22:20:24 +00:00
|
|
|
more_preemptable
|
2006-06-16 18:52:25 +00:00
|
|
|
);
|
2006-06-15 23:15:27 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2006-09-05 19:00:59 +00:00
|
|
|
if (atp) {
|
|
|
|
// the scheduled result is already running.
|
|
|
|
// see if it fits in mem
|
|
|
|
//
|
2006-10-02 23:42:38 +00:00
|
|
|
if (atp->procinfo.working_set_size_smoothed > ram_left) {
|
2006-09-05 19:00:59 +00:00
|
|
|
atp->next_scheduler_state = CPU_SCHED_PREEMPTED;
|
2006-10-03 22:50:13 +00:00
|
|
|
atp->too_large = true;
|
2006-09-05 19:00:59 +00:00
|
|
|
nrunning--;
|
|
|
|
if (log_flags.mem_usage_debug) {
|
|
|
|
msg_printf(rp->project, MSG_INFO,
|
2006-10-03 17:14:56 +00:00
|
|
|
"[mem_usage_debug] enforce: result %s can't continue, too big %.2fMB > %.2fMB",
|
|
|
|
rp->name, atp->procinfo.working_set_size_smoothed/MEGA, ram_left/MEGA
|
2006-09-05 19:00:59 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
} else {
|
2006-10-02 23:42:38 +00:00
|
|
|
ram_left -= atp->procinfo.working_set_size_smoothed;
|
2006-09-05 19:00:59 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// if the result already has a (non-running) active task,
|
|
|
|
// see if it fits in mem
|
|
|
|
//
|
2006-10-02 23:42:38 +00:00
|
|
|
atp = lookup_active_task_by_result(rp);
|
2006-09-05 19:00:59 +00:00
|
|
|
if (atp) {
|
2006-10-02 23:42:38 +00:00
|
|
|
if (atp->procinfo.working_set_size_smoothed > ram_left) {
|
2006-10-03 22:50:13 +00:00
|
|
|
atp->too_large = true;
|
2006-09-05 19:00:59 +00:00
|
|
|
if (log_flags.mem_usage_debug) {
|
|
|
|
msg_printf(rp->project, MSG_INFO,
|
2006-10-03 17:14:56 +00:00
|
|
|
"[mem_usage_debug] enforce: result %s can't start, too big %.2fMB > %.2fMB",
|
|
|
|
rp->name, atp->procinfo.working_set_size_smoothed/MEGA, ram_left/MEGA
|
2006-09-05 19:00:59 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2006-06-19 22:20:24 +00:00
|
|
|
|
|
|
|
// The scheduled result is not already running.
|
|
|
|
// Preempt something if needed and possible.
|
|
|
|
//
|
|
|
|
bool run_task = false;
|
2006-06-21 17:34:55 +00:00
|
|
|
bool need_to_preempt = (nrunning==ncpus) && running_tasks.size();
|
|
|
|
// the 2nd half of the above is redundant
|
|
|
|
if (need_to_preempt) {
|
|
|
|
atp = running_tasks[0];
|
2006-06-19 22:20:24 +00:00
|
|
|
bool running_beyond_sched_period =
|
2006-08-22 20:58:57 +00:00
|
|
|
gstate.now - atp->run_interval_start_wall_time
|
2006-06-28 03:10:49 +00:00
|
|
|
> gstate.global_prefs.cpu_scheduling_period_minutes*60;
|
2006-06-19 22:20:24 +00:00
|
|
|
bool checkpointed_recently =
|
2006-08-03 04:07:18 +00:00
|
|
|
(now - atp->checkpoint_wall_time < 10);
|
2006-06-19 22:20:24 +00:00
|
|
|
if (rp->project->deadlines_missed
|
|
|
|
|| (running_beyond_sched_period && checkpointed_recently)
|
|
|
|
) {
|
|
|
|
// only deadlines_missed results from a project
|
|
|
|
// will qualify for immediate enforcement.
|
|
|
|
//
|
|
|
|
if (rp->project->deadlines_missed) {
|
|
|
|
rp->project->deadlines_missed--;
|
|
|
|
}
|
|
|
|
atp->next_scheduler_state = CPU_SCHED_PREEMPTED;
|
2006-06-21 17:34:55 +00:00
|
|
|
nrunning--;
|
2006-06-19 22:20:24 +00:00
|
|
|
std::pop_heap(
|
2006-06-21 17:34:55 +00:00
|
|
|
running_tasks.begin(),
|
|
|
|
running_tasks.end(),
|
2006-06-19 22:20:24 +00:00
|
|
|
more_preemptable
|
|
|
|
);
|
2006-06-21 17:34:55 +00:00
|
|
|
running_tasks.pop_back();
|
2006-06-19 22:20:24 +00:00
|
|
|
run_task = true;
|
2006-06-15 23:15:27 +00:00
|
|
|
}
|
2006-06-19 22:20:24 +00:00
|
|
|
} else {
|
|
|
|
run_task = true;
|
|
|
|
}
|
|
|
|
if (run_task) {
|
2006-08-07 00:31:28 +00:00
|
|
|
atp = get_task(rp);
|
|
|
|
atp->next_scheduler_state = CPU_SCHED_SCHEDULED;
|
2006-06-21 17:34:55 +00:00
|
|
|
nrunning++;
|
2006-10-02 23:42:38 +00:00
|
|
|
ram_left -= atp->procinfo.working_set_size_smoothed;
|
2006-09-05 19:00:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure we don't exceed RAM limits
|
|
|
|
//
|
|
|
|
for (i=0; i<running_tasks.size(); i++) {
|
|
|
|
atp = running_tasks[i];
|
2006-10-02 23:42:38 +00:00
|
|
|
if (atp->procinfo.working_set_size_smoothed > ram_left) {
|
2006-09-05 19:00:59 +00:00
|
|
|
atp->next_scheduler_state = CPU_SCHED_PREEMPTED;
|
2006-10-03 22:50:13 +00:00
|
|
|
atp->too_large = true;
|
2006-09-05 19:00:59 +00:00
|
|
|
if (log_flags.mem_usage_debug) {
|
|
|
|
msg_printf(atp->result->project, MSG_INFO,
|
2006-10-03 17:14:56 +00:00
|
|
|
"[mem_usage_debug] enforce: result %s can't keep, too big %.2fMB > %.2fMB",
|
|
|
|
atp->result->name, atp->procinfo.working_set_size_smoothed/MEGA, ram_left/MEGA
|
2006-09-05 19:00:59 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
} else {
|
2006-10-02 23:42:38 +00:00
|
|
|
ram_left -= atp->procinfo.working_set_size_smoothed;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-09-07 17:59:34 +00:00
|
|
|
if (log_flags.cpu_sched_debug && nrunning < ncpus) {
|
2006-10-03 17:14:56 +00:00
|
|
|
msg_printf(0, MSG_INFO, "[cpu_sched_debug] Some CPUs idle (%d<%d)",
|
2006-08-04 23:07:11 +00:00
|
|
|
nrunning, ncpus
|
|
|
|
);
|
2006-10-03 17:14:56 +00:00
|
|
|
request_work_fetch("CPUs idle");
|
2006-08-04 23:07:11 +00:00
|
|
|
}
|
2006-09-07 17:59:34 +00:00
|
|
|
if (log_flags.cpu_sched_debug && nrunning > ncpus) {
|
2006-09-07 20:39:25 +00:00
|
|
|
msg_printf(0, MSG_INFO, "[cpu_sched_debug] Too many tasks started (%d>%d)",
|
2006-08-04 23:07:11 +00:00
|
|
|
nrunning, ncpus
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2006-02-08 21:05:51 +00:00
|
|
|
// schedule new non CPU intensive tasks
|
|
|
|
//
|
|
|
|
for (i=0; i<results.size(); i++) {
|
|
|
|
RESULT* rp = results[i];
|
|
|
|
if (rp->project->non_cpu_intensive && rp->runnable()) {
|
2006-08-07 00:31:28 +00:00
|
|
|
atp = get_task(rp);
|
|
|
|
atp->next_scheduler_state = CPU_SCHED_SCHEDULED;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-02 23:42:38 +00:00
|
|
|
double swap_left = (global_prefs.vm_max_used_frac)*host_info.m_swap;
|
2006-02-08 21:05:51 +00:00
|
|
|
|
2006-06-19 22:20:24 +00:00
|
|
|
// preempt and start tasks as needed
|
|
|
|
//
|
2006-02-08 21:05:51 +00:00
|
|
|
for (i=0; i<active_tasks.active_tasks.size(); i++) {
|
|
|
|
atp = active_tasks.active_tasks[i];
|
|
|
|
if (atp->scheduler_state == CPU_SCHED_SCHEDULED
|
|
|
|
&& atp->next_scheduler_state == CPU_SCHED_PREEMPTED
|
|
|
|
) {
|
2006-06-16 18:52:25 +00:00
|
|
|
action = true;
|
2006-02-08 21:05:51 +00:00
|
|
|
bool preempt_by_quit = !global_prefs.leave_apps_in_memory;
|
2006-10-02 23:42:38 +00:00
|
|
|
if (swap_left < 0) {
|
2006-10-03 17:14:56 +00:00
|
|
|
if (log_flags.mem_usage_debug) {
|
|
|
|
msg_printf(atp->result->project, MSG_INFO,
|
|
|
|
"[mem_usage_debug] out of swap space, will preempt by quit"
|
|
|
|
);
|
|
|
|
}
|
2006-10-02 23:42:38 +00:00
|
|
|
preempt_by_quit = true;
|
|
|
|
}
|
2006-02-08 21:05:51 +00:00
|
|
|
|
|
|
|
atp->preempt(preempt_by_quit);
|
2006-08-06 16:14:33 +00:00
|
|
|
atp->scheduler_state = CPU_SCHED_PREEMPTED;
|
2006-02-08 21:05:51 +00:00
|
|
|
} else if (atp->scheduler_state != CPU_SCHED_SCHEDULED
|
|
|
|
&& atp->next_scheduler_state == CPU_SCHED_SCHEDULED
|
|
|
|
) {
|
2006-06-16 18:52:25 +00:00
|
|
|
action = true;
|
2006-06-15 23:15:27 +00:00
|
|
|
int retval = atp->resume_or_start();
|
2006-02-08 21:05:51 +00:00
|
|
|
if (retval) {
|
|
|
|
report_result_error(
|
|
|
|
*(atp->result), "Couldn't start or resume: %d", retval
|
|
|
|
);
|
|
|
|
request_schedule_cpus("start failed");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
atp->scheduler_state = CPU_SCHED_SCHEDULED;
|
2006-08-22 20:58:57 +00:00
|
|
|
atp->run_interval_start_wall_time = now;
|
2006-02-08 21:05:51 +00:00
|
|
|
app_started = gstate.now;
|
2006-10-02 23:42:38 +00:00
|
|
|
swap_left -= atp->procinfo.swap_size;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
}
|
2006-06-17 16:26:29 +00:00
|
|
|
if (action) {
|
|
|
|
set_client_state_dirty("enforce_cpu_schedule");
|
|
|
|
}
|
2006-06-16 18:52:25 +00:00
|
|
|
return action;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
2006-02-11 03:00:37 +00:00
|
|
|
// return true if we don't have enough runnable tasks to keep all CPUs busy
|
|
|
|
//
|
|
|
|
bool CLIENT_STATE::no_work_for_a_cpu() {
|
|
|
|
unsigned int i;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
for (i=0; i< results.size(); i++){
|
|
|
|
RESULT* rp = results[i];
|
2006-06-20 20:29:10 +00:00
|
|
|
if (!rp->nearly_runnable()) continue;
|
2006-02-11 03:00:37 +00:00
|
|
|
if (rp->project->non_cpu_intensive) continue;
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
return ncpus > count;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the project's rrsim_proc_rate:
|
|
|
|
// the fraction of each CPU that it will get in round-robin mode.
|
|
|
|
// Precondition: the project's "active" array is populated
|
2006-02-08 21:05:51 +00:00
|
|
|
//
|
2006-08-23 21:14:47 +00:00
|
|
|
void PROJECT::set_rrsim_proc_rate(double rrs) {
|
2006-02-08 21:05:51 +00:00
|
|
|
int nactive = (int)active.size();
|
|
|
|
if (nactive == 0) return;
|
|
|
|
double x;
|
2006-02-11 03:00:37 +00:00
|
|
|
|
2006-02-08 21:05:51 +00:00
|
|
|
if (rrs) {
|
|
|
|
x = resource_share/rrs;
|
|
|
|
} else {
|
2006-02-11 03:00:37 +00:00
|
|
|
x = 1; // pathological case; maybe should be 1/# runnable projects
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// if this project has fewer active results than CPUs,
|
|
|
|
// scale up its share to reflect this
|
|
|
|
//
|
|
|
|
if (nactive < gstate.ncpus) {
|
|
|
|
x *= ((double)gstate.ncpus)/nactive;
|
|
|
|
}
|
|
|
|
|
2006-02-11 03:00:37 +00:00
|
|
|
// But its rate on a given CPU can't exceed 1
|
2006-02-08 21:05:51 +00:00
|
|
|
//
|
|
|
|
if (x>1) {
|
|
|
|
x = 1;
|
|
|
|
}
|
2006-08-23 21:14:47 +00:00
|
|
|
rrsim_proc_rate = x*gstate.overall_cpu_frac();
|
2006-09-05 01:27:54 +00:00
|
|
|
if (log_flags.rr_simulation) {
|
|
|
|
msg_printf(this, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[rr_sim] set_rrsim_proc_rate: %f (rrs %f, rs %f, nactive %d, ocf %f",
|
2006-09-05 01:27:54 +00:00
|
|
|
rrsim_proc_rate, rrs, resource_share, nactive, gstate.overall_cpu_frac()
|
|
|
|
);
|
|
|
|
}
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
2006-06-16 18:52:25 +00:00
|
|
|
// Do a simulation of weighted round-robin scheduling.
|
|
|
|
//
|
|
|
|
// Inputs:
|
|
|
|
// per_cpu_proc_rate:
|
|
|
|
// the expected number of CPU seconds per wall second on each CPU
|
|
|
|
// rrs:
|
2006-08-23 21:14:47 +00:00
|
|
|
// the total resource share of relevant projects
|
|
|
|
// (runnable when called from CPU sched,
|
|
|
|
// nearly runnable when called from work fetch)
|
|
|
|
// NOTE: this may be zero, e.g. if no projects have results
|
2006-06-16 18:52:25 +00:00
|
|
|
//
|
|
|
|
// Outputs (changes to global state):
|
|
|
|
// For each project p:
|
|
|
|
// p->rr_sim_deadlines_missed
|
|
|
|
// p->cpu_shortfall
|
|
|
|
// For each result r:
|
|
|
|
// r->rr_sim_misses_deadline
|
|
|
|
// r->last_rr_sim_missed_deadline
|
|
|
|
// gstate.cpu_shortfall
|
|
|
|
//
|
2006-10-03 19:44:54 +00:00
|
|
|
// NOTE: deadline misses are not counted for tasks
|
|
|
|
// that are too large to run in RAM right now.
|
2006-02-08 21:05:51 +00:00
|
|
|
//
|
2006-10-03 19:44:54 +00:00
|
|
|
void CLIENT_STATE::rr_simulation() {
|
2006-08-23 21:14:47 +00:00
|
|
|
double rrs = nearly_runnable_resource_share();
|
2006-08-30 03:13:16 +00:00
|
|
|
double trs = total_resource_share();
|
2006-02-08 21:05:51 +00:00
|
|
|
PROJECT* p, *pbest;
|
|
|
|
RESULT* rp, *rpbest;
|
|
|
|
vector<RESULT*> active;
|
|
|
|
unsigned int i;
|
|
|
|
double x;
|
|
|
|
vector<RESULT*>::iterator it;
|
2006-10-03 19:44:54 +00:00
|
|
|
|
|
|
|
double ar = available_ram();
|
2006-02-08 21:05:51 +00:00
|
|
|
|
2006-08-29 23:15:47 +00:00
|
|
|
if (log_flags.rr_simulation) {
|
2006-08-30 03:13:16 +00:00
|
|
|
msg_printf(0, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[rr_sim] rr_sim start: work_buf_min %f rrs %f trs %f",
|
2006-08-30 03:13:16 +00:00
|
|
|
work_buf_min(), rrs, trs
|
|
|
|
);
|
2006-08-29 23:15:47 +00:00
|
|
|
}
|
|
|
|
|
2006-08-23 21:14:47 +00:00
|
|
|
// Initialize result lists for each project:
|
|
|
|
// "active" is what's currently running (in the simulation)
|
|
|
|
// "pending" is what's queued
|
2006-02-08 21:05:51 +00:00
|
|
|
//
|
|
|
|
for (i=0; i<projects.size(); i++) {
|
|
|
|
p = projects[i];
|
|
|
|
p->active.clear();
|
|
|
|
p->pending.clear();
|
2006-06-16 18:52:25 +00:00
|
|
|
p->rr_sim_deadlines_missed = 0;
|
|
|
|
p->cpu_shortfall = 0;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i=0; i<results.size(); i++) {
|
|
|
|
rp = results[i];
|
2006-06-20 20:29:10 +00:00
|
|
|
if (!rp->nearly_runnable()) continue;
|
2006-02-08 21:05:51 +00:00
|
|
|
if (rp->project->non_cpu_intensive) continue;
|
|
|
|
rp->rrsim_cpu_left = rp->estimated_cpu_time_remaining();
|
|
|
|
p = rp->project;
|
|
|
|
if (p->active.size() < (unsigned int)ncpus) {
|
|
|
|
active.push_back(rp);
|
|
|
|
p->active.push_back(rp);
|
|
|
|
} else {
|
|
|
|
p->pending.push_back(rp);
|
|
|
|
}
|
2006-06-16 18:52:25 +00:00
|
|
|
rp->last_rr_sim_missed_deadline = rp->rr_sim_misses_deadline;
|
|
|
|
rp->rr_sim_misses_deadline = false;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i=0; i<projects.size(); i++) {
|
|
|
|
p = projects[i];
|
2006-08-23 21:14:47 +00:00
|
|
|
p->set_rrsim_proc_rate(rrs);
|
2006-08-29 20:39:44 +00:00
|
|
|
// if there are no results for a project,
|
|
|
|
// the shortfall is its entire share.
|
2006-08-11 04:16:23 +00:00
|
|
|
//
|
|
|
|
if (!p->active.size()) {
|
2006-08-30 03:13:16 +00:00
|
|
|
double rsf = trs ? p->resource_share/trs : 1;
|
2006-08-23 21:14:47 +00:00
|
|
|
p->cpu_shortfall = work_buf_min() * overall_cpu_frac() * ncpus * rsf;
|
2006-08-29 20:39:44 +00:00
|
|
|
if (log_flags.rr_simulation) {
|
2006-08-29 23:15:47 +00:00
|
|
|
msg_printf(p, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[rr_sim] no results; shortfall %f wbm %f ocf %f rsf %f",
|
2006-08-29 20:39:44 +00:00
|
|
|
p->cpu_shortfall, work_buf_min(), overall_cpu_frac(), rsf
|
|
|
|
);
|
|
|
|
}
|
2006-08-11 04:16:23 +00:00
|
|
|
}
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
2006-08-23 21:14:47 +00:00
|
|
|
double buf_end = now + work_buf_min();
|
|
|
|
|
2006-02-08 21:05:51 +00:00
|
|
|
// Simulation loop. Keep going until work done
|
|
|
|
//
|
|
|
|
double sim_now = now;
|
2006-07-10 00:46:07 +00:00
|
|
|
cpu_shortfall = 0;
|
2006-02-08 21:05:51 +00:00
|
|
|
while (active.size()) {
|
|
|
|
|
|
|
|
// compute finish times and see which result finishes first
|
|
|
|
//
|
|
|
|
rpbest = NULL;
|
|
|
|
for (i=0; i<active.size(); i++) {
|
|
|
|
rp = active[i];
|
|
|
|
p = rp->project;
|
|
|
|
rp->rrsim_finish_delay = rp->rrsim_cpu_left/p->rrsim_proc_rate;
|
|
|
|
if (!rpbest || rp->rrsim_finish_delay < rpbest->rrsim_finish_delay) {
|
|
|
|
rpbest = rp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-06-16 18:52:25 +00:00
|
|
|
pbest = rpbest->project;
|
|
|
|
|
2006-08-24 22:12:04 +00:00
|
|
|
if (log_flags.rr_simulation) {
|
2006-09-05 01:27:54 +00:00
|
|
|
msg_printf(pbest, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[rr_sim] result %s finishes after %f (%f/%f)",
|
2006-08-24 22:12:04 +00:00
|
|
|
rpbest->name, rpbest->rrsim_finish_delay, rpbest->rrsim_cpu_left, pbest->rrsim_proc_rate
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2006-02-08 21:05:51 +00:00
|
|
|
// "rpbest" is first result to finish. Does it miss its deadline?
|
|
|
|
//
|
2006-08-11 04:16:23 +00:00
|
|
|
double diff = sim_now + rpbest->rrsim_finish_delay - ((rpbest->computation_deadline()-now)*CPU_PESSIMISM_FACTOR + now);
|
2006-02-08 21:05:51 +00:00
|
|
|
if (diff > 0) {
|
2006-10-03 19:44:54 +00:00
|
|
|
ACTIVE_TASK* atp = lookup_active_task_by_result(rpbest);
|
|
|
|
if (atp && atp->procinfo.working_set_size_smoothed > ar) {
|
|
|
|
if (log_flags.rr_simulation) {
|
|
|
|
msg_printf(pbest, MSG_INFO,
|
|
|
|
"[rr_sim] result %s misses deadline but too large to run",
|
|
|
|
rpbest->name
|
|
|
|
);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rpbest->rr_sim_misses_deadline = true;
|
|
|
|
pbest->rr_sim_deadlines_missed++;
|
|
|
|
if (log_flags.rr_simulation) {
|
|
|
|
msg_printf(pbest, MSG_INFO,
|
|
|
|
"[rr_sim] result %s misses deadline by %f",
|
|
|
|
rpbest->name, diff
|
|
|
|
);
|
|
|
|
}
|
2006-08-24 22:12:04 +00:00
|
|
|
}
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
2006-09-07 18:19:25 +00:00
|
|
|
int last_active_size = (int)active.size();
|
|
|
|
int last_proj_active_size = (int)pbest->active.size();
|
2006-06-16 18:52:25 +00:00
|
|
|
|
2006-02-08 21:05:51 +00:00
|
|
|
// remove *rpbest from active set,
|
|
|
|
// and adjust CPU time left for other results
|
|
|
|
//
|
|
|
|
it = active.begin();
|
|
|
|
while (it != active.end()) {
|
|
|
|
rp = *it;
|
|
|
|
if (rp == rpbest) {
|
|
|
|
it = active.erase(it);
|
|
|
|
} else {
|
|
|
|
x = rp->project->rrsim_proc_rate*rpbest->rrsim_finish_delay;
|
|
|
|
rp->rrsim_cpu_left -= x;
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove *rpbest from its project's active set
|
|
|
|
//
|
|
|
|
it = pbest->active.begin();
|
|
|
|
while (it != pbest->active.end()) {
|
|
|
|
rp = *it;
|
|
|
|
if (rp == rpbest) {
|
|
|
|
it = pbest->active.erase(it);
|
|
|
|
} else {
|
|
|
|
it++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If project has more results, add one to active set.
|
|
|
|
//
|
|
|
|
if (pbest->pending.size()) {
|
|
|
|
rp = pbest->pending[0];
|
|
|
|
pbest->pending.erase(pbest->pending.begin());
|
|
|
|
active.push_back(rp);
|
|
|
|
pbest->active.push_back(rp);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If all work done for a project, subtract that project's share
|
|
|
|
// and recompute processing rates
|
|
|
|
//
|
|
|
|
if (pbest->active.size() == 0) {
|
|
|
|
rrs -= pbest->resource_share;
|
2006-09-05 01:27:54 +00:00
|
|
|
if (log_flags.rr_simulation) {
|
|
|
|
msg_printf(pbest, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[rr_sim] decr rrs by %f, new value %f",
|
2006-09-05 01:27:54 +00:00
|
|
|
pbest->resource_share, rrs
|
|
|
|
);
|
|
|
|
}
|
2006-02-08 21:05:51 +00:00
|
|
|
for (i=0; i<projects.size(); i++) {
|
|
|
|
p = projects[i];
|
2006-08-23 21:14:47 +00:00
|
|
|
p->set_rrsim_proc_rate(rrs);
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-06-16 18:52:25 +00:00
|
|
|
// increment CPU shortfalls if necessary
|
|
|
|
//
|
|
|
|
if (sim_now < buf_end) {
|
|
|
|
double end_time = sim_now + rpbest->rrsim_finish_delay;
|
|
|
|
if (end_time > buf_end) end_time = buf_end;
|
2006-08-23 21:14:47 +00:00
|
|
|
double d_time = end_time - sim_now;
|
2006-07-10 00:46:07 +00:00
|
|
|
int nidle_cpus = ncpus - last_active_size;
|
2006-09-05 01:27:54 +00:00
|
|
|
if (nidle_cpus<0) nidle_cpus = 0;
|
2006-08-11 04:16:23 +00:00
|
|
|
if (nidle_cpus > 0) cpu_shortfall += d_time*nidle_cpus;
|
2006-06-16 18:52:25 +00:00
|
|
|
|
2006-08-30 03:13:16 +00:00
|
|
|
double rsf = trs?pbest->resource_share/trs:1;
|
2006-08-23 21:14:47 +00:00
|
|
|
double proj_cpu_share = ncpus*rsf;
|
2006-08-11 04:16:23 +00:00
|
|
|
|
2006-08-29 23:15:47 +00:00
|
|
|
if (last_proj_active_size < proj_cpu_share) {
|
2006-08-11 04:16:23 +00:00
|
|
|
pbest->cpu_shortfall += d_time*(proj_cpu_share - last_proj_active_size);
|
2006-08-29 23:15:47 +00:00
|
|
|
if (log_flags.rr_simulation) {
|
|
|
|
msg_printf(pbest, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[rr_sim] new shortfall %f d_time %f proj_cpu_share %f lpas %d",
|
2006-08-29 23:15:47 +00:00
|
|
|
pbest->cpu_shortfall, d_time, proj_cpu_share, last_proj_active_size
|
|
|
|
);
|
|
|
|
}
|
2006-08-11 04:16:23 +00:00
|
|
|
}
|
2006-08-23 21:14:47 +00:00
|
|
|
|
|
|
|
if (end_time < buf_end) {
|
2006-08-11 04:16:23 +00:00
|
|
|
d_time = buf_end - end_time;
|
|
|
|
// if this is the last result for this project, account for the tail
|
|
|
|
if (!pbest->active.size()) {
|
|
|
|
pbest->cpu_shortfall += d_time * proj_cpu_share;
|
2006-08-29 23:15:47 +00:00
|
|
|
if (log_flags.rr_simulation) {
|
2006-09-07 20:39:25 +00:00
|
|
|
msg_printf(pbest, MSG_INFO, "[rr_sim] proj out of work; shortfall %f d %f pcs %f",
|
2006-08-29 23:15:47 +00:00
|
|
|
pbest->cpu_shortfall, d_time, proj_cpu_share
|
|
|
|
);
|
|
|
|
}
|
2006-08-11 04:16:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (log_flags.rr_simulation) {
|
|
|
|
msg_printf(0, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[rr_sim] total: idle cpus %d, last active %d, active %d, shortfall %f",
|
2006-08-29 23:15:47 +00:00
|
|
|
nidle_cpus, last_active_size, (int)active.size(), cpu_shortfall
|
|
|
|
|
|
|
|
);
|
|
|
|
msg_printf(0, MSG_INFO,
|
|
|
|
"rr_sim proj %s: last active %d, active %d, shortfall %f",
|
2006-08-23 21:14:47 +00:00
|
|
|
pbest->get_project_name(), last_proj_active_size, (int)pbest->active.size(),
|
|
|
|
pbest->cpu_shortfall
|
|
|
|
);
|
2006-06-15 23:15:27 +00:00
|
|
|
}
|
|
|
|
}
|
2006-02-08 21:05:51 +00:00
|
|
|
|
2006-06-16 18:52:25 +00:00
|
|
|
sim_now += rpbest->rrsim_finish_delay;
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
2006-08-11 04:16:23 +00:00
|
|
|
|
|
|
|
if (sim_now < buf_end) {
|
|
|
|
cpu_shortfall += (buf_end - sim_now) * ncpus;
|
|
|
|
}
|
|
|
|
|
2006-06-22 19:40:30 +00:00
|
|
|
if (log_flags.rr_simulation) {
|
2006-06-21 23:16:46 +00:00
|
|
|
for (i=0; i<projects.size(); i++) {
|
|
|
|
p = projects[i];
|
2006-06-23 03:28:35 +00:00
|
|
|
if (p->cpu_shortfall) {
|
2006-08-29 23:15:47 +00:00
|
|
|
msg_printf(p, MSG_INFO,
|
2006-09-07 20:39:25 +00:00
|
|
|
"[rr_sim] shortfall %f\n", p->cpu_shortfall
|
2006-06-23 03:28:35 +00:00
|
|
|
);
|
|
|
|
}
|
2006-06-21 23:16:46 +00:00
|
|
|
}
|
|
|
|
msg_printf(NULL, MSG_INFO,
|
2006-10-03 19:44:54 +00:00
|
|
|
"rr_simulation: end; total shortfall %f\n",
|
2006-06-21 23:16:46 +00:00
|
|
|
cpu_shortfall
|
|
|
|
);
|
2006-06-16 18:52:25 +00:00
|
|
|
}
|
2006-02-08 21:05:51 +00:00
|
|
|
}
|
|
|
|
|
2006-08-03 04:07:18 +00:00
|
|
|
// trigger CPU schedule enforcement.
|
|
|
|
// Called when a new schedule is computed,
|
|
|
|
// and when an app checkpoints.
|
|
|
|
//
|
|
|
|
void CLIENT_STATE::request_enforce_schedule(const char* where) {
|
|
|
|
if (log_flags.cpu_sched_debug) {
|
2006-09-07 20:39:25 +00:00
|
|
|
msg_printf(0, MSG_INFO, "[cpu_sched_debug] Request enforce CPU schedule: %s", where);
|
2006-08-03 04:07:18 +00:00
|
|
|
}
|
|
|
|
must_enforce_cpu_schedule = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// trigger CPU scheduling.
|
|
|
|
// Called when a result is completed,
|
|
|
|
// when new results become runnable,
|
|
|
|
// or when the user performs a UI interaction
|
|
|
|
// (e.g. suspending or resuming a project or result).
|
|
|
|
//
|
|
|
|
void CLIENT_STATE::request_schedule_cpus(const char* where) {
|
|
|
|
if (log_flags.cpu_sched_debug) {
|
2006-09-07 20:39:25 +00:00
|
|
|
msg_printf(0, MSG_INFO, "[cpu_sched_debug] Request CPU reschedule: %s", where);
|
2006-08-03 04:07:18 +00:00
|
|
|
}
|
|
|
|
must_schedule_cpus = true;
|
|
|
|
}
|
|
|
|
|
2006-06-16 18:52:25 +00:00
|
|
|
const char *BOINC_RCSID_e830ee1 = "$Id$";
|