mirror of https://github.com/BOINC/boinc.git
*** empty log message ***
svn path=/trunk/boinc/; revision=6273
This commit is contained in:
parent
3828904a84
commit
5e8150b9ae
|
@ -7229,3 +7229,22 @@ David 31 May 2005
|
|||
util.C
|
||||
sched/
|
||||
server_types.C
|
||||
|
||||
David 31 May 2005
|
||||
- core client: remove extraneous scheduling-related messages.
|
||||
Just say when we're entering/leaving either
|
||||
no-work-fetch mode or deadline-scheduling mode
|
||||
|
||||
- got rid of work_request arg to
|
||||
CLIENT_STATE::make_scheduler_request() and
|
||||
SCHEDULER_OP::init_op_project()
|
||||
|
||||
- When making a scheduler RPC, always show
|
||||
the work request and the # of results being returned
|
||||
|
||||
client/
|
||||
client_state.h
|
||||
client_types.h
|
||||
cs_apps.C
|
||||
cs_scheduler.C
|
||||
scheduler_op.C,h
|
||||
|
|
|
@ -276,7 +276,7 @@ public:
|
|||
double work_needed_secs();
|
||||
PROJECT* next_project_master_pending();
|
||||
PROJECT* next_project_need_work(PROJECT* old, int urgency);
|
||||
int make_scheduler_request(PROJECT*, double);
|
||||
int make_scheduler_request(PROJECT*);
|
||||
int handle_scheduler_reply(PROJECT*, char* scheduler_url, int& nresults);
|
||||
int compute_work_requests();
|
||||
private:
|
||||
|
|
|
@ -244,6 +244,8 @@ public:
|
|||
// 2) on_frac and active_frac
|
||||
// see doc/work_req.php
|
||||
int work_request_urgency;
|
||||
int nresults_returned;
|
||||
// # of results being returned in current scheduler op
|
||||
|
||||
#if 0
|
||||
// used in disk-space management (temp)
|
||||
|
|
|
@ -376,7 +376,7 @@ bool CLIENT_STATE::schedule_earliest_deadline_result(double expected_pay_off) {
|
|||
}
|
||||
if (!best_result) return false;
|
||||
|
||||
msg_printf(0, MSG_INFO, "earliest deadline: %f %s", earliest_deadline, best_result->name);
|
||||
// msg_printf(0, MSG_INFO, "earliest deadline: %f %s", earliest_deadline, best_result->name);
|
||||
schedule_result(best_result);
|
||||
best_result->already_selected = true;
|
||||
best_project->anticipated_debt -= expected_pay_off;
|
||||
|
@ -483,12 +483,12 @@ bool CLIENT_STATE::schedule_cpus(double now) {
|
|||
elapsed_time = now - cpu_sched_last_time;
|
||||
if (must_schedule_cpus) {
|
||||
must_schedule_cpus = false;
|
||||
msg_printf(0, MSG_INFO, "schedule_cpus: must schedule");
|
||||
// msg_printf(0, MSG_INFO, "schedule_cpus: must schedule");
|
||||
} else {
|
||||
if (elapsed_time < (global_prefs.cpu_scheduling_period_minutes*60)) {
|
||||
return false;
|
||||
}
|
||||
msg_printf(0, MSG_INFO, "schedule_cpus: time %f", elapsed_time);
|
||||
// msg_printf(0, MSG_INFO, "schedule_cpus: time %f", elapsed_time);
|
||||
}
|
||||
cpu_sched_last_time = now;
|
||||
|
||||
|
|
|
@ -197,7 +197,7 @@ PROJECT* CLIENT_STATE::next_project_need_work(PROJECT* old, int urgency) {
|
|||
// Write a scheduler request to a disk file
|
||||
// (later sent to the scheduling server)
|
||||
//
|
||||
int CLIENT_STATE::make_scheduler_request(PROJECT* p, double work_req) {
|
||||
int CLIENT_STATE::make_scheduler_request(PROJECT* p) {
|
||||
char buf[1024];
|
||||
|
||||
get_sched_request_filename(*p, buf);
|
||||
|
@ -231,7 +231,7 @@ int CLIENT_STATE::make_scheduler_request(PROJECT* p, double work_req) {
|
|||
p->anonymous_platform?"anonymous":platform_name,
|
||||
core_client_major_version,
|
||||
core_client_minor_version,
|
||||
work_req,
|
||||
p->work_request,
|
||||
p->resource_share / trs,
|
||||
ettprc(p, proj_min_results(p, ncpus)-1)
|
||||
);
|
||||
|
@ -301,9 +301,11 @@ int CLIENT_STATE::make_scheduler_request(PROJECT* p, double work_req) {
|
|||
if (retval) return retval;
|
||||
retval = host_info.write(mf);
|
||||
if (retval) return retval;
|
||||
p->nresults_returned = 0;
|
||||
for (i=0; i<results.size(); i++) {
|
||||
rp = results[i];
|
||||
if (rp->project == p && rp->ready_to_report) {
|
||||
p->nresults_returned++;
|
||||
rp->write(mf, true);
|
||||
}
|
||||
}
|
||||
|
@ -1039,32 +1041,38 @@ void CLIENT_STATE::set_cpu_scheduler_modes() {
|
|||
if (booked_to[lowest_booked_cpu] - now > (rp->report_deadline - now) * MAX_CPU_LOAD_FACTOR * up_frac) {
|
||||
should_not_fetch_work = true;
|
||||
use_earliest_deadline_first = true;
|
||||
#if 0
|
||||
if (!cpu_earliest_deadline_first || !work_fetch_no_new_work) {
|
||||
msg_printf(NULL, MSG_INFO,
|
||||
"Computer is overcommitted"
|
||||
);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
// Is the nearest deadline within a day?
|
||||
//
|
||||
if (rp->report_deadline - now < 60 * 60 * 24) {
|
||||
use_earliest_deadline_first = true;
|
||||
#if 0
|
||||
if (!cpu_earliest_deadline_first) {
|
||||
msg_printf(NULL, MSG_INFO,
|
||||
"Less than 1 day until deadline."
|
||||
);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// is there a deadline < twice the users connect period?
|
||||
//
|
||||
if (rp->report_deadline - now < global_prefs.work_buf_min_days * SECONDS_PER_DAY * 2) {
|
||||
use_earliest_deadline_first = true;
|
||||
use_earliest_deadline_first = true;
|
||||
#if 0
|
||||
if (!cpu_earliest_deadline_first) {
|
||||
msg_printf(NULL, MSG_INFO,
|
||||
"Deadline is before reconnect time"
|
||||
);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
frac_booked += rp->estimated_cpu_time_remaining() / (rp->report_deadline - now);
|
||||
|
@ -1072,35 +1080,37 @@ void CLIENT_STATE::set_cpu_scheduler_modes() {
|
|||
|
||||
if (frac_booked > MAX_CPU_LOAD_FACTOR * up_frac * ncpus) {
|
||||
should_not_fetch_work = true;
|
||||
#if 0
|
||||
if (!work_fetch_no_new_work) {
|
||||
msg_printf(NULL, MSG_INFO,
|
||||
"Nearly overcommitted."
|
||||
);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// display only when the policy changes to avoid once per second
|
||||
//
|
||||
if (work_fetch_no_new_work && !should_not_fetch_work) {
|
||||
msg_printf(NULL, MSG_INFO,
|
||||
"New work fetch policy: work fetch allowed."
|
||||
"Allowing work fetch again."
|
||||
);
|
||||
}
|
||||
|
||||
if (!work_fetch_no_new_work && should_not_fetch_work) {
|
||||
msg_printf(NULL, MSG_INFO,
|
||||
"New work fetch policy: no work fetch allowed."
|
||||
"Suspending work fetch because computer is overcommitted."
|
||||
);
|
||||
}
|
||||
|
||||
if (cpu_earliest_deadline_first && !use_earliest_deadline_first) {
|
||||
msg_printf(NULL, MSG_INFO,
|
||||
"New CPU scheduler policy: highest debt first."
|
||||
"Resuming round-robin CPU scheduling."
|
||||
);
|
||||
}
|
||||
if (!cpu_earliest_deadline_first && use_earliest_deadline_first) {
|
||||
msg_printf(NULL, MSG_INFO,
|
||||
"New CPU scheduler policy: earliest deadline first."
|
||||
"Using earliest-deadline-first scheduling because computer is overcommitted."
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -83,14 +83,11 @@ bool SCHEDULER_OP::check_master_fetch_start() {
|
|||
int SCHEDULER_OP::init_get_work(int urgency) {
|
||||
int retval;
|
||||
char err_msg[256];
|
||||
double ns;
|
||||
|
||||
must_get_work = true;
|
||||
project = gstate.next_project_need_work(0, urgency);
|
||||
if (project) {
|
||||
ns = project->work_request;
|
||||
msg_printf(project, MSG_INFO, "Requesting %.2f seconds of work", ns);
|
||||
retval = init_op_project(ns);
|
||||
retval = init_op_project();
|
||||
if (retval) {
|
||||
sprintf(err_msg, "init_op_project failed, error %d\n", retval);
|
||||
backoff(project, err_msg);
|
||||
|
@ -107,13 +104,13 @@ int SCHEDULER_OP::init_get_work(int urgency) {
|
|||
int SCHEDULER_OP::init_return_results(PROJECT* p) {
|
||||
must_get_work = false;
|
||||
project = p;
|
||||
return init_op_project(p->work_request);
|
||||
return init_op_project();
|
||||
}
|
||||
|
||||
// try to initiate an RPC to the current project.
|
||||
// If there are multiple schedulers, start with the first one
|
||||
//
|
||||
int SCHEDULER_OP::init_op_project(double ns) {
|
||||
int SCHEDULER_OP::init_op_project() {
|
||||
int retval;
|
||||
char err_msg[256];
|
||||
|
||||
|
@ -129,7 +126,7 @@ int SCHEDULER_OP::init_op_project(double ns) {
|
|||
retval = init_master_fetch();
|
||||
goto done;
|
||||
}
|
||||
retval = gstate.make_scheduler_request(project, ns);
|
||||
retval = gstate.make_scheduler_request(project);
|
||||
if (retval) {
|
||||
msg_printf(project, MSG_ERROR, "make_scheduler_request: %d\n", retval);
|
||||
goto done;
|
||||
|
@ -224,6 +221,11 @@ int SCHEDULER_OP::start_rpc() {
|
|||
project, MSG_INFO,
|
||||
"Sending scheduler request to %s\n", scheduler_url
|
||||
);
|
||||
msg_printf(
|
||||
project, MSG_INFO,
|
||||
"Requesting %.0f seconds of work, returning %d results\n",
|
||||
project->work_request, project->nresults_returned
|
||||
);
|
||||
}
|
||||
|
||||
get_sched_request_filename(*project, request_file);
|
||||
|
@ -448,7 +450,7 @@ bool SCHEDULER_OP::poll() {
|
|||
if (urgency != WORK_FETCH_DONT_NEED) {
|
||||
project = gstate.next_project_need_work(project, urgency);
|
||||
if (project) {
|
||||
retval = init_op_project(project->work_request);
|
||||
retval = init_op_project();
|
||||
} else {
|
||||
scheduler_op_done = true;
|
||||
}
|
||||
|
@ -514,7 +516,7 @@ bool SCHEDULER_OP::poll() {
|
|||
if (urgency != WORK_FETCH_DONT_NEED) {
|
||||
project = gstate.next_project_need_work(project, urgency);
|
||||
if (project) {
|
||||
retval = init_op_project(project->work_request);
|
||||
retval = init_op_project();
|
||||
} else {
|
||||
scheduler_op_done = true;
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ struct SCHEDULER_OP {
|
|||
bool poll();
|
||||
int init_get_work(int urgency);
|
||||
int init_return_results(PROJECT*);
|
||||
int init_op_project(double ns);
|
||||
int init_op_project();
|
||||
int init_master_fetch();
|
||||
int set_min_rpc_time(PROJECT*);
|
||||
bool update_urls(std::vector<std::string> &urls);
|
||||
|
|
Loading…
Reference in New Issue