- client: show "est. delay" correctly in work fetch debug msgs

- client: show times correctly in rr_sim debug msgs
	- client: in "requesting new tasks" msg,
		say what resources we're requesting (if there's more than CPU)
	- client: estimated delay was possibly being calculated incorrectly
		because of roundoff error

svn path=/trunk/boinc/; revision=18269
This commit is contained in:
David Anderson 2009-06-02 22:53:57 +00:00
parent 7555047cac
commit c2097091fe
5 changed files with 44 additions and 9 deletions

View File

@ -4929,3 +4929,17 @@ David 1 June 2009
sched/
assimilator.py
testasm.py
David 2 June 2009
- client: show "est. delay" correctly in work fetch debug msgs
- client: show times correctly in rr_sim debug msgs
- client: in "requesting new tasks" msg,
say what resources we're requesting (if there's more than CPU)
- client: estimated delay was possibly being calculated incorrectly
because of roundoff error
client/
cs_scheduler.cpp
rr_sim.cpp
scheduler_op.cpp
work_fetch.cpp

View File

@ -230,7 +230,7 @@ int CLIENT_STATE::make_scheduler_request(PROJECT* p) {
if (coproc_cuda) {
coproc_cuda->req_secs = cuda_work_fetch.req_secs;
coproc_cuda->req_instances = cuda_work_fetch.req_instances;
coproc_cuda->estimated_delay = cuda_work_fetch.estimated_delay;
coproc_cuda->estimated_delay = cuda_work_fetch.req_secs?cuda_work_fetch.estimated_delay:0;
}
if (coprocs.coprocs.size()) {

View File

@ -349,6 +349,8 @@ void CLIENT_STATE::rr_simulation() {
sim_status.remove_active(rpbest);
pbest->rr_sim_status.remove_active(rpbest);
sim_now += rpbest->rrsim_finish_delay;
// start new jobs; may need to start more than one
// if this job used multiple resource instances
//
@ -370,7 +372,6 @@ void CLIENT_STATE::rr_simulation() {
pbest->rr_sim_status.activate(rp);
}
}
sim_now += rpbest->rrsim_finish_delay;
}
// if simulation ends before end of buffer, take the tail into account

View File

@ -209,7 +209,7 @@ void SCHEDULER_OP::rpc_failed(const char* msg) {
//
int SCHEDULER_OP::start_rpc(PROJECT* p) {
int retval;
char request_file[1024], reply_file[1024];
char request_file[1024], reply_file[1024], buf[256];
safe_strcpy(scheduler_url, p->get_scheduler_url(url_index, url_random));
if (log_flags.sched_ops) {
@ -217,13 +217,24 @@ int SCHEDULER_OP::start_rpc(PROJECT* p) {
"Sending scheduler request: %s.", rpc_reason_string(reason)
);
if (cpu_work_fetch.req_secs || cuda_work_fetch.req_secs) {
if (coproc_cuda) {
if (cpu_work_fetch.req_secs && cuda_work_fetch.req_secs) {
sprintf(buf, " for CPU and GPU");
} else if (cpu_work_fetch.req_secs) {
sprintf(buf, " for CPU");
} else {
sprintf(buf, " for GPU");
}
} else {
strcpy(buf, "");
}
if (p->nresults_returned) {
msg_printf(p, MSG_INFO,
"Reporting %d completed tasks, requesting new tasks",
p->nresults_returned
"Reporting %d completed tasks, requesting new tasks%s",
p->nresults_returned, buf
);
} else {
msg_printf(p, MSG_INFO, "Requesting new tasks");
msg_printf(p, MSG_INFO, "Requesting new tasks%s", buf);
}
} else {
if (p->nresults_returned) {

View File

@ -124,15 +124,25 @@ void RSC_WORK_FETCH::accumulate_shortfall(double d_time) {
if (idle > 0) {
shortfall += idle*d_time;
}
#if 0
msg_printf(0, MSG_INFO, "accum shortf (%s): idle %f dt %f sf %f",
rsc_name(rsc_type), idle, d_time, shortfall
);
#endif
}
// "estimated delay" is the interval for which we expect the
// resource to be saturated.
//
void RSC_WORK_FETCH::update_estimated_delay(double dt) {
if (sim_nused >= ninstances) {
if (sim_nused+1e-6 >= ninstances) {
estimated_delay = dt;
}
#if 0
msg_printf(0, MSG_INFO, "est delay (%s): used %e instances %d dt %f est delay %f",
rsc_name(rsc_type), sim_nused, ninstances, dt, estimated_delay
);
#endif
}
// see if the project's debt is beyond what would normally happen;
@ -340,7 +350,6 @@ static void print_req(PROJECT* p) {
void RSC_WORK_FETCH::clear_request() {
req_secs = 0;
req_instances = 0;
estimated_delay = 0;
}
void WORK_FETCH::clear_request() {
@ -672,7 +681,7 @@ void WORK_FETCH::write_request(FILE* f) {
cpu_work_fetch.req_secs,
cpu_work_fetch.req_secs,
cpu_work_fetch.req_instances,
cpu_work_fetch.estimated_delay
cpu_work_fetch.req_secs?cpu_work_fetch.estimated_delay:0
);
}