- client (Unix): if app uses < 1 CPU, run at nice 10 (not 0)

- client: suppress specious error message

svn path=/trunk/boinc/; revision=16496
This commit is contained in:
David Anderson 2008-11-14 22:08:50 +00:00
parent 553f708f6c
commit 98d6931d63
4 changed files with 438 additions and 422 deletions

View File

@ -9438,3 +9438,13 @@ Rom 10 Nov 2008
/ /
configure.ac configure.ac
version.h version.h
David 14 Nov 2008
- client (Unix): if app uses < 1 CPU, run at nice 10 (not 0)
- client: suppress specious error message
client/
app_start.cpp
rr_sim.cpp
lib/
util.h

View File

@ -687,10 +687,10 @@ int ACTIVE_TASK::start(bool first_time) {
); );
} }
if (!high_priority) { if (setpriority(PRIO_PROCESS, pid,
if (setpriority(PRIO_PROCESS, pid, PROCESS_IDLE_PRIORITY)) { high_priority?PROCESS_MEDIUM_PRIORITY:PROCESS_IDLE_PRIORITY)
perror("setpriority"); ) {
} perror("setpriority");
} }
#else #else
@ -812,10 +812,10 @@ int ACTIVE_TASK::start(bool first_time) {
freopen(STDERR_FILE, "a", stderr); freopen(STDERR_FILE, "a", stderr);
#ifdef HAVE_SETPRIORITY #ifdef HAVE_SETPRIORITY
if (!high_priority) { if (setpriority(PRIO_PROCESS, 0,
if (setpriority(PRIO_PROCESS, 0, PROCESS_IDLE_PRIORITY)) { high_priority?PROCESS_MEDIUM_PRIORITY:PROCESS_IDLE_PRIORITY)
perror("setpriority"); ) {
} perror("setpriority");
} }
#endif #endif
sprintf(cmdline, "%s %s", wup->command_line.c_str(), app_version->cmdline); sprintf(cmdline, "%s %s", wup->command_line.c_str(), app_version->cmdline);

View File

@ -1,414 +1,419 @@
// This file is part of BOINC. // This file is part of BOINC.
// http://boinc.berkeley.edu // http://boinc.berkeley.edu
// Copyright (C) 2008 University of California // Copyright (C) 2008 University of California
// //
// BOINC is free software; you can redistribute it and/or modify it // BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License // under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation, // as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version. // either version 3 of the License, or (at your option) any later version.
// //
// BOINC is distributed in the hope that it will be useful, // BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of // but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details. // See the GNU Lesser General Public License for more details.
// //
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>. // along with BOINC. If not, see <http://www.gnu.org/licenses/>.
#ifdef _WIN32 #ifdef _WIN32
#include "boinc_win.h" #include "boinc_win.h"
#endif #endif
#ifdef SIM #ifdef SIM
#include "sim.h" #include "sim.h"
#else #else
#include "client_state.h" #include "client_state.h"
#endif #endif
#include "client_msgs.h" #include "client_msgs.h"
struct RR_SIM_STATUS { struct RR_SIM_STATUS {
std::vector<RESULT*> active; std::vector<RESULT*> active;
COPROCS coprocs; COPROCS coprocs;
double active_ncpus; double active_ncpus;
inline bool can_run(RESULT* rp) { inline bool can_run(RESULT* rp) {
return coprocs.sufficient_coprocs( return coprocs.sufficient_coprocs(
rp->avp->coprocs, log_flags.rr_simulation, "rr_simulation" rp->avp->coprocs, log_flags.rr_simulation, "rr_simulation"
); );
} }
inline void activate(RESULT* rp) { inline void activate(RESULT* rp) {
coprocs.reserve_coprocs( coprocs.reserve_coprocs(
rp->avp->coprocs, rp, log_flags.rr_simulation, "rr_simulation" rp->avp->coprocs, rp, log_flags.rr_simulation, "rr_simulation"
); );
active.push_back(rp); active.push_back(rp);
active_ncpus += rp->avp->avg_ncpus; active_ncpus += rp->avp->avg_ncpus;
} }
// remove *rpbest from active set, // remove *rpbest from active set,
// and adjust CPU time left for other results // and adjust CPU time left for other results
// //
inline void remove_active(RESULT* rpbest) { inline void remove_active(RESULT* rpbest) {
coprocs.free_coprocs(rpbest->avp->coprocs, rpbest, log_flags.rr_simulation, "rr_simulation"); coprocs.free_coprocs(rpbest->avp->coprocs, rpbest, log_flags.rr_simulation, "rr_simulation");
vector<RESULT*>::iterator it = active.begin(); vector<RESULT*>::iterator it = active.begin();
while (it != active.end()) { while (it != active.end()) {
RESULT* rp = *it; RESULT* rp = *it;
if (rp == rpbest) { if (rp == rpbest) {
it = active.erase(it); it = active.erase(it);
} else { } else {
rp->rrsim_cpu_left -= rp->rrsim_rate*rpbest->rrsim_finish_delay; rp->rrsim_cpu_left -= rp->rrsim_rate*rpbest->rrsim_finish_delay;
if (rp->rrsim_cpu_left < 0) {
msg_printf(rp->project, MSG_INTERNAL_ERROR, // can be slightly less than 0 due to roundoff
"%s: negative CPU time left %f", rp->name, rp->rrsim_cpu_left //
); if (rp->rrsim_cpu_left < -1) {
rp->rrsim_cpu_left = 0; msg_printf(rp->project, MSG_INTERNAL_ERROR,
} "%s: negative CPU time left %f", rp->name, rp->rrsim_cpu_left
it++; );
} }
} if (rp->rrsim_cpu_left < 0) {
active_ncpus -= rpbest->avp->avg_ncpus; rp->rrsim_cpu_left = 0;
} }
#if 0 it++;
inline int nactive() { }
return (int) active.size(); }
} active_ncpus -= rpbest->avp->avg_ncpus;
#endif }
RR_SIM_STATUS() { #if 0
active_ncpus = 0; inline int nactive() {
} return (int) active.size();
~RR_SIM_STATUS() { }
coprocs.delete_coprocs(); #endif
} RR_SIM_STATUS() {
}; active_ncpus = 0;
}
void RR_SIM_PROJECT_STATUS::activate(RESULT* rp) { ~RR_SIM_STATUS() {
active.push_back(rp); coprocs.delete_coprocs();
active_ncpus += rp->avp->avg_ncpus; }
} };
bool RR_SIM_PROJECT_STATUS::can_run(RESULT* rp, int ncpus) { void RR_SIM_PROJECT_STATUS::activate(RESULT* rp) {
if (rp->uses_coprocs()) return true; active.push_back(rp);
return active_ncpus < ncpus; active_ncpus += rp->avp->avg_ncpus;
} }
void RR_SIM_PROJECT_STATUS::remove_active(RESULT* r) {
std::vector<RESULT*>::iterator it = active.begin(); bool RR_SIM_PROJECT_STATUS::can_run(RESULT* rp, int ncpus) {
while (it != active.end()) { if (rp->uses_coprocs()) return true;
if (*it == r) { return active_ncpus < ncpus;
it = active.erase(it); }
} else { void RR_SIM_PROJECT_STATUS::remove_active(RESULT* r) {
it++; std::vector<RESULT*>::iterator it = active.begin();
} while (it != active.end()) {
} if (*it == r) {
active_ncpus -= r->avp->avg_ncpus; it = active.erase(it);
} } else {
it++;
// Set the project's rrsim_proc_rate: }
// the fraction of CPU that it will get in round-robin mode. }
// active_ncpus -= r->avp->avg_ncpus;
void PROJECT::set_rrsim_proc_rate(double rrs) { }
double x;
// Set the project's rrsim_proc_rate:
if (rrs) { // the fraction of CPU that it will get in round-robin mode.
x = resource_share/rrs; //
} else { void PROJECT::set_rrsim_proc_rate(double rrs) {
x = 1; // pathological case; maybe should be 1/# runnable projects double x;
}
if (rrs) {
rr_sim_status.proc_rate = x*gstate.overall_cpu_frac(); x = resource_share/rrs;
if (log_flags.rr_simulation) { } else {
msg_printf(this, MSG_INFO, x = 1; // pathological case; maybe should be 1/# runnable projects
"[rr_sim] set_rrsim_proc_rate: %f (rrs %f, rs %f, ocf %f", }
rr_sim_status.proc_rate, rrs, resource_share, gstate.overall_cpu_frac()
); rr_sim_status.proc_rate = x*gstate.overall_cpu_frac();
} if (log_flags.rr_simulation) {
} msg_printf(this, MSG_INFO,
"[rr_sim] set_rrsim_proc_rate: %f (rrs %f, rs %f, ocf %f",
void CLIENT_STATE::print_deadline_misses() { rr_sim_status.proc_rate, rrs, resource_share, gstate.overall_cpu_frac()
unsigned int i; );
RESULT* rp; }
PROJECT* p; }
for (i=0; i<results.size(); i++){
rp = results[i]; void CLIENT_STATE::print_deadline_misses() {
if (rp->rr_sim_misses_deadline && !rp->last_rr_sim_missed_deadline) { unsigned int i;
msg_printf(rp->project, MSG_INFO, RESULT* rp;
"[cpu_sched_debug] Result %s projected to miss deadline.", rp->name PROJECT* p;
); for (i=0; i<results.size(); i++){
} rp = results[i];
else if (!rp->rr_sim_misses_deadline && rp->last_rr_sim_missed_deadline) { if (rp->rr_sim_misses_deadline && !rp->last_rr_sim_missed_deadline) {
msg_printf(rp->project, MSG_INFO, msg_printf(rp->project, MSG_INFO,
"[cpu_sched_debug] Result %s projected to meet deadline.", rp->name "[cpu_sched_debug] Result %s projected to miss deadline.", rp->name
); );
} }
} else if (!rp->rr_sim_misses_deadline && rp->last_rr_sim_missed_deadline) {
for (i=0; i<projects.size(); i++) { msg_printf(rp->project, MSG_INFO,
p = projects[i]; "[cpu_sched_debug] Result %s projected to meet deadline.", rp->name
if (p->rr_sim_status.deadlines_missed) { );
msg_printf(p, MSG_INFO, }
"[cpu_sched_debug] Project has %d projected deadline misses", }
p->rr_sim_status.deadlines_missed for (i=0; i<projects.size(); i++) {
); p = projects[i];
} if (p->rr_sim_status.deadlines_missed) {
} msg_printf(p, MSG_INFO,
} "[cpu_sched_debug] Project has %d projected deadline misses",
p->rr_sim_status.deadlines_missed
// Do a simulation of the current workload );
// with weighted round-robin (WRR) scheduling. }
// Include jobs that are downloading. }
// }
// For efficiency, we simulate a crude approximation of WRR.
// We don't model time-slicing. // Do a simulation of the current workload
// Instead we use a continuous model where, at a given point, // with weighted round-robin (WRR) scheduling.
// each project has a set of running jobs that uses at most all CPUs // Include jobs that are downloading.
// (and obeys coprocessor limits). //
// These jobs are assumed to run at a rate proportionate to their avg_ncpus, // For efficiency, we simulate a crude approximation of WRR.
// and each project gets CPU proportionate to its RRS. // We don't model time-slicing.
// // Instead we use a continuous model where, at a given point,
// Outputs are changes to global state: // each project has a set of running jobs that uses at most all CPUs
// For each project p: // (and obeys coprocessor limits).
// p->rr_sim_deadlines_missed // These jobs are assumed to run at a rate proportionate to their avg_ncpus,
// p->cpu_shortfall // and each project gets CPU proportionate to its RRS.
// For each result r: //
// r->rr_sim_misses_deadline // Outputs are changes to global state:
// r->last_rr_sim_missed_deadline // For each project p:
// gstate.cpu_shortfall // p->rr_sim_deadlines_missed
// // p->cpu_shortfall
// Deadline misses are not counted for tasks // For each result r:
// that are too large to run in RAM right now. // r->rr_sim_misses_deadline
// // r->last_rr_sim_missed_deadline
void CLIENT_STATE::rr_simulation() { // gstate.cpu_shortfall
double rrs = nearly_runnable_resource_share(); //
double trs = total_resource_share(); // Deadline misses are not counted for tasks
PROJECT* p, *pbest; // that are too large to run in RAM right now.
RESULT* rp, *rpbest; //
RR_SIM_STATUS sim_status; void CLIENT_STATE::rr_simulation() {
unsigned int i; double rrs = nearly_runnable_resource_share();
double trs = total_resource_share();
sim_status.coprocs.clone(coprocs, false); PROJECT* p, *pbest;
double ar = available_ram(); RESULT* rp, *rpbest;
RR_SIM_STATUS sim_status;
if (log_flags.rr_simulation) { unsigned int i;
msg_printf(0, MSG_INFO,
"[rr_sim] rr_sim start: work_buf_total %f rrs %f trs %f ncpus %d", sim_status.coprocs.clone(coprocs, false);
work_buf_total(), rrs, trs, ncpus double ar = available_ram();
);
} if (log_flags.rr_simulation) {
msg_printf(0, MSG_INFO,
for (i=0; i<projects.size(); i++) { "[rr_sim] rr_sim start: work_buf_total %f rrs %f trs %f ncpus %d",
p = projects[i]; work_buf_total(), rrs, trs, ncpus
if (p->non_cpu_intensive) continue; );
p->rr_sim_status.clear(); }
}
for (i=0; i<projects.size(); i++) {
// Decide what jobs to include in the simulation, p = projects[i];
// and pick the ones that are initially running if (p->non_cpu_intensive) continue;
// p->rr_sim_status.clear();
for (i=0; i<results.size(); i++) { }
rp = results[i];
if (!rp->nearly_runnable()) continue; // Decide what jobs to include in the simulation,
if (rp->some_download_stalled()) continue; // and pick the ones that are initially running
if (rp->project->non_cpu_intensive) continue; //
rp->rrsim_cpu_left = rp->estimated_cpu_time_remaining(false); for (i=0; i<results.size(); i++) {
if (rp->rrsim_cpu_left <= 0) continue; rp = results[i];
p = rp->project; if (!rp->nearly_runnable()) continue;
if (p->rr_sim_status.can_run(rp, gstate.ncpus) && sim_status.can_run(rp)) { if (rp->some_download_stalled()) continue;
sim_status.activate(rp); if (rp->project->non_cpu_intensive) continue;
p->rr_sim_status.activate(rp); rp->rrsim_cpu_left = rp->estimated_cpu_time_remaining(false);
} else { if (rp->rrsim_cpu_left <= 0) continue;
p->rr_sim_status.add_pending(rp); p = rp->project;
} if (p->rr_sim_status.can_run(rp, gstate.ncpus) && sim_status.can_run(rp)) {
rp->last_rr_sim_missed_deadline = rp->rr_sim_misses_deadline; sim_status.activate(rp);
rp->rr_sim_misses_deadline = false; p->rr_sim_status.activate(rp);
} } else {
p->rr_sim_status.add_pending(rp);
for (i=0; i<projects.size(); i++) { }
p = projects[i]; rp->last_rr_sim_missed_deadline = rp->rr_sim_misses_deadline;
if (p->non_cpu_intensive) continue; rp->rr_sim_misses_deadline = false;
p->set_rrsim_proc_rate(rrs); }
// if there are no results for a project,
// the shortfall is its entire share. for (i=0; i<projects.size(); i++) {
// p = projects[i];
if (p->rr_sim_status.none_active()) { if (p->non_cpu_intensive) continue;
double rsf = trs ? p->resource_share/trs : 1; p->set_rrsim_proc_rate(rrs);
p->rr_sim_status.cpu_shortfall = work_buf_total() * overall_cpu_frac() * ncpus * rsf; // if there are no results for a project,
if (log_flags.rr_simulation) { // the shortfall is its entire share.
msg_printf(p, MSG_INFO, //
"[rr_sim] no results; shortfall %f wbt %f ocf %f rsf %f", if (p->rr_sim_status.none_active()) {
p->rr_sim_status.cpu_shortfall, work_buf_total(), overall_cpu_frac(), rsf double rsf = trs ? p->resource_share/trs : 1;
); p->rr_sim_status.cpu_shortfall = work_buf_total() * overall_cpu_frac() * ncpus * rsf;
} if (log_flags.rr_simulation) {
} msg_printf(p, MSG_INFO,
} "[rr_sim] no results; shortfall %f wbt %f ocf %f rsf %f",
p->rr_sim_status.cpu_shortfall, work_buf_total(), overall_cpu_frac(), rsf
double buf_end = now + work_buf_total(); );
}
// Simulation loop. Keep going until work done }
// }
double sim_now = now;
cpu_shortfall = 0; double buf_end = now + work_buf_total();
while (sim_status.active.size()) {
// Simulation loop. Keep going until work done
// compute finish times and see which result finishes first //
// double sim_now = now;
rpbest = NULL; cpu_shortfall = 0;
for (i=0; i<sim_status.active.size(); i++) { while (sim_status.active.size()) {
rp = sim_status.active[i];
p = rp->project; // compute finish times and see which result finishes first
rp->rrsim_rate = p->rr_sim_status.proc_rate; //
if (p->rr_sim_status.active_ncpus < ncpus) { rpbest = NULL;
rp->rrsim_rate *= (ncpus/p->rr_sim_status.active_ncpus); for (i=0; i<sim_status.active.size(); i++) {
} rp = sim_status.active[i];
rp->rrsim_rate *= rp->avp->avg_ncpus/p->rr_sim_status.active_ncpus; p = rp->project;
if (rp->rrsim_rate > rp->avp->avg_ncpus * overall_cpu_frac()) { rp->rrsim_rate = p->rr_sim_status.proc_rate;
rp->rrsim_rate = rp->avp->avg_ncpus * overall_cpu_frac(); if (p->rr_sim_status.active_ncpus < ncpus) {
} rp->rrsim_rate *= (ncpus/p->rr_sim_status.active_ncpus);
rp->rrsim_finish_delay = rp->rrsim_cpu_left/rp->rrsim_rate; }
if (!rpbest || rp->rrsim_finish_delay < rpbest->rrsim_finish_delay) { rp->rrsim_rate *= rp->avp->avg_ncpus/p->rr_sim_status.active_ncpus;
rpbest = rp; if (rp->rrsim_rate > rp->avp->avg_ncpus * overall_cpu_frac()) {
} rp->rrsim_rate = rp->avp->avg_ncpus * overall_cpu_frac();
} }
rp->rrsim_finish_delay = rp->rrsim_cpu_left/rp->rrsim_rate;
pbest = rpbest->project; if (!rpbest || rp->rrsim_finish_delay < rpbest->rrsim_finish_delay) {
rpbest = rp;
if (log_flags.rr_simulation) { }
msg_printf(pbest, MSG_INFO, }
"[rr_sim] result %s finishes after %f (%f/%f)",
rpbest->name, rpbest->rrsim_finish_delay, pbest = rpbest->project;
rpbest->rrsim_cpu_left, rpbest->rrsim_rate
); if (log_flags.rr_simulation) {
} msg_printf(pbest, MSG_INFO,
"[rr_sim] result %s finishes after %f (%f/%f)",
// "rpbest" is first result to finish. Does it miss its deadline? rpbest->name, rpbest->rrsim_finish_delay,
// rpbest->rrsim_cpu_left, rpbest->rrsim_rate
double diff = sim_now + rpbest->rrsim_finish_delay - ((rpbest->computation_deadline()-now)*CPU_PESSIMISM_FACTOR + now); );
if (diff > 0) { }
ACTIVE_TASK* atp = lookup_active_task_by_result(rpbest);
if (atp && atp->procinfo.working_set_size_smoothed > ar) { // "rpbest" is first result to finish. Does it miss its deadline?
if (log_flags.rr_simulation) { //
msg_printf(pbest, MSG_INFO, double diff = sim_now + rpbest->rrsim_finish_delay - ((rpbest->computation_deadline()-now)*CPU_PESSIMISM_FACTOR + now);
"[rr_sim] result %s misses deadline but too large to run", if (diff > 0) {
rpbest->name ACTIVE_TASK* atp = lookup_active_task_by_result(rpbest);
); if (atp && atp->procinfo.working_set_size_smoothed > ar) {
} if (log_flags.rr_simulation) {
} else { msg_printf(pbest, MSG_INFO,
rpbest->rr_sim_misses_deadline = true; "[rr_sim] result %s misses deadline but too large to run",
pbest->rr_sim_status.deadlines_missed++; rpbest->name
if (log_flags.rr_simulation) { );
msg_printf(pbest, MSG_INFO, }
"[rr_sim] result %s misses deadline by %f", } else {
rpbest->name, diff rpbest->rr_sim_misses_deadline = true;
); pbest->rr_sim_status.deadlines_missed++;
} if (log_flags.rr_simulation) {
} msg_printf(pbest, MSG_INFO,
} "[rr_sim] result %s misses deadline by %f",
rpbest->name, diff
double last_active_ncpus = sim_status.active_ncpus; );
double last_proj_active_ncpus = pbest->rr_sim_status.active_ncpus; }
}
sim_status.remove_active(rpbest); }
pbest->rr_sim_status.remove_active(rpbest); double last_active_ncpus = sim_status.active_ncpus;
double last_proj_active_ncpus = pbest->rr_sim_status.active_ncpus;
// If project has more results, add one or more to active set.
// sim_status.remove_active(rpbest);
while (1) {
rp = pbest->rr_sim_status.get_pending(); pbest->rr_sim_status.remove_active(rpbest);
if (!rp) break;
if (pbest->rr_sim_status.can_run(rp, gstate.ncpus) && sim_status.can_run(rp)) { // If project has more results, add one or more to active set.
sim_status.activate(rp); //
pbest->rr_sim_status.activate(rp); while (1) {
} else { rp = pbest->rr_sim_status.get_pending();
pbest->rr_sim_status.add_pending(rp); if (!rp) break;
break; if (pbest->rr_sim_status.can_run(rp, gstate.ncpus) && sim_status.can_run(rp)) {
} sim_status.activate(rp);
} pbest->rr_sim_status.activate(rp);
} else {
// If all work done for a project, subtract that project's share pbest->rr_sim_status.add_pending(rp);
// and recompute processing rates break;
// }
if (pbest->rr_sim_status.none_active()) { }
rrs -= pbest->resource_share;
if (log_flags.rr_simulation) { // If all work done for a project, subtract that project's share
msg_printf(pbest, MSG_INFO, // and recompute processing rates
"[rr_sim] decr rrs by %f, new value %f", //
pbest->resource_share, rrs if (pbest->rr_sim_status.none_active()) {
); rrs -= pbest->resource_share;
} if (log_flags.rr_simulation) {
for (i=0; i<projects.size(); i++) { msg_printf(pbest, MSG_INFO,
p = projects[i]; "[rr_sim] decr rrs by %f, new value %f",
if (p->non_cpu_intensive) continue; pbest->resource_share, rrs
p->set_rrsim_proc_rate(rrs); );
} }
} for (i=0; i<projects.size(); i++) {
p = projects[i];
// increment CPU shortfalls if necessary if (p->non_cpu_intensive) continue;
// p->set_rrsim_proc_rate(rrs);
if (sim_now < buf_end) { }
double end_time = sim_now + rpbest->rrsim_finish_delay; }
if (end_time > buf_end) end_time = buf_end;
double d_time = end_time - sim_now; // increment CPU shortfalls if necessary
double nidle_cpus = ncpus - last_active_ncpus; //
if (nidle_cpus<0) nidle_cpus = 0; if (sim_now < buf_end) {
if (nidle_cpus > 0) cpu_shortfall += d_time*nidle_cpus; double end_time = sim_now + rpbest->rrsim_finish_delay;
if (end_time > buf_end) end_time = buf_end;
double rsf = trs?pbest->resource_share/trs:1; double d_time = end_time - sim_now;
double proj_cpu_share = ncpus*rsf; double nidle_cpus = ncpus - last_active_ncpus;
if (nidle_cpus<0) nidle_cpus = 0;
if (last_proj_active_ncpus < proj_cpu_share) { if (nidle_cpus > 0) cpu_shortfall += d_time*nidle_cpus;
pbest->rr_sim_status.cpu_shortfall += d_time*(proj_cpu_share - last_proj_active_ncpus);
if (log_flags.rr_simulation) { double rsf = trs?pbest->resource_share/trs:1;
msg_printf(pbest, MSG_INFO, double proj_cpu_share = ncpus*rsf;
"[rr_sim] new shortfall %f d_time %f proj_cpu_share %f lpan %f",
pbest->rr_sim_status.cpu_shortfall, d_time, proj_cpu_share, last_proj_active_ncpus if (last_proj_active_ncpus < proj_cpu_share) {
); pbest->rr_sim_status.cpu_shortfall += d_time*(proj_cpu_share - last_proj_active_ncpus);
} if (log_flags.rr_simulation) {
} msg_printf(pbest, MSG_INFO,
"[rr_sim] new shortfall %f d_time %f proj_cpu_share %f lpan %f",
if (end_time < buf_end) { pbest->rr_sim_status.cpu_shortfall, d_time, proj_cpu_share, last_proj_active_ncpus
d_time = buf_end - end_time; );
// if this is the last result for this project, account for the tail }
if (pbest->rr_sim_status.none_active()) { }
pbest->rr_sim_status.cpu_shortfall += d_time * proj_cpu_share;
if (log_flags.rr_simulation) { if (end_time < buf_end) {
msg_printf(pbest, MSG_INFO, "[rr_sim] proj out of work; shortfall %f d %f pcs %f", d_time = buf_end - end_time;
pbest->rr_sim_status.cpu_shortfall, d_time, proj_cpu_share // if this is the last result for this project, account for the tail
); if (pbest->rr_sim_status.none_active()) {
} pbest->rr_sim_status.cpu_shortfall += d_time * proj_cpu_share;
} if (log_flags.rr_simulation) {
} msg_printf(pbest, MSG_INFO, "[rr_sim] proj out of work; shortfall %f d %f pcs %f",
if (log_flags.rr_simulation) { pbest->rr_sim_status.cpu_shortfall, d_time, proj_cpu_share
msg_printf(0, MSG_INFO, );
"[rr_sim] total: idle cpus %f, last active %f, active %f, shortfall %f", }
nidle_cpus, last_active_ncpus, sim_status.active_ncpus, }
cpu_shortfall }
if (log_flags.rr_simulation) {
); msg_printf(0, MSG_INFO,
msg_printf(pbest, MSG_INFO, "[rr_sim] total: idle cpus %f, last active %f, active %f, shortfall %f",
"[rr_sim] %s: last active %f, active %f, shortfall %f", nidle_cpus, last_active_ncpus, sim_status.active_ncpus,
pbest->get_project_name(), last_proj_active_ncpus, cpu_shortfall
pbest->rr_sim_status.active_ncpus,
pbest->rr_sim_status.cpu_shortfall );
); msg_printf(pbest, MSG_INFO,
} "[rr_sim] %s: last active %f, active %f, shortfall %f",
} pbest->get_project_name(), last_proj_active_ncpus,
pbest->rr_sim_status.active_ncpus,
sim_now += rpbest->rrsim_finish_delay; pbest->rr_sim_status.cpu_shortfall
} );
}
if (sim_now < buf_end) { }
cpu_shortfall += (buf_end - sim_now) * ncpus;
} sim_now += rpbest->rrsim_finish_delay;
}
if (log_flags.rr_simulation) {
for (i=0; i<projects.size(); i++) { if (sim_now < buf_end) {
p = projects[i]; cpu_shortfall += (buf_end - sim_now) * ncpus;
if (p->non_cpu_intensive) continue; }
if (p->rr_sim_status.cpu_shortfall) {
msg_printf(p, MSG_INFO, if (log_flags.rr_simulation) {
"[rr_sim] shortfall %f\n", p->rr_sim_status.cpu_shortfall for (i=0; i<projects.size(); i++) {
); p = projects[i];
} if (p->non_cpu_intensive) continue;
} if (p->rr_sim_status.cpu_shortfall) {
msg_printf(NULL, MSG_INFO, msg_printf(p, MSG_INFO,
"[rr_sim] done; total shortfall %f\n", "[rr_sim] shortfall %f\n", p->rr_sim_status.cpu_shortfall
cpu_shortfall );
); }
} }
} msg_printf(NULL, MSG_INFO,
"[rr_sim] done; total shortfall %f\n",
cpu_shortfall
);
}
}

View File

@ -58,6 +58,7 @@ extern int boinc_process_cpu_time(double& cpu);
// (don't use 20 because // (don't use 20 because
// //
static const int PROCESS_IDLE_PRIORITY = 19; static const int PROCESS_IDLE_PRIORITY = 19;
static const int PROCESS_MEDIUM_PRIORITY = 10;
extern double linux_cpu_time(int pid); extern double linux_cpu_time(int pid);
#endif #endif