mirror of https://github.com/BOINC/boinc.git
client: set work requests for coprocs specified in cc_config.xml
We weren't copying the request fields from RSC_WORK_FETCH to COPROC. Do this, and clean up the code a bit. Note: the arrays that parallel the COPROCS::coprocs array are a bit of a kludge; that stuff logically belongs in COPROC. But it's specific to the client, so I can't put it there. Maybe I could do something fancy with derived classes, not sure.
This commit is contained in:
parent
9f1625a0a7
commit
31541e166d
|
@ -227,28 +227,8 @@ int CLIENT_STATE::make_scheduler_request(PROJECT* p) {
|
|||
total_disk_usage, p->disk_usage, p->disk_share
|
||||
);
|
||||
|
||||
// copy request values from RSC_WORK_FETCH to COPROC
|
||||
//
|
||||
int j = rsc_index(GPU_TYPE_NVIDIA);
|
||||
if (j > 0) {
|
||||
coprocs.nvidia.req_secs = rsc_work_fetch[j].req_secs;
|
||||
coprocs.nvidia.req_instances = rsc_work_fetch[j].req_instances;
|
||||
coprocs.nvidia.estimated_delay = rsc_work_fetch[j].req_secs?rsc_work_fetch[j].busy_time_estimator.get_busy_time():0;
|
||||
}
|
||||
j = rsc_index(GPU_TYPE_ATI);
|
||||
if (j > 0) {
|
||||
coprocs.ati.req_secs = rsc_work_fetch[j].req_secs;
|
||||
coprocs.ati.req_instances = rsc_work_fetch[j].req_instances;
|
||||
coprocs.ati.estimated_delay = rsc_work_fetch[j].req_secs?rsc_work_fetch[j].busy_time_estimator.get_busy_time():0;
|
||||
}
|
||||
j = rsc_index(GPU_TYPE_INTEL);
|
||||
if (j > 0) {
|
||||
coprocs.intel_gpu.req_secs = rsc_work_fetch[j].req_secs;
|
||||
coprocs.intel_gpu.req_instances = rsc_work_fetch[j].req_instances;
|
||||
coprocs.intel_gpu.estimated_delay = rsc_work_fetch[j].req_secs?rsc_work_fetch[j].busy_time_estimator.get_busy_time():0;
|
||||
}
|
||||
|
||||
if (coprocs.n_rsc > 1) {
|
||||
work_fetch.copy_requests();
|
||||
coprocs.write_xml(mf, true);
|
||||
}
|
||||
|
||||
|
@ -303,7 +283,7 @@ int CLIENT_STATE::make_scheduler_request(PROJECT* p) {
|
|||
// send descriptions of app versions
|
||||
//
|
||||
fprintf(f, "<app_versions>\n");
|
||||
j=0;
|
||||
int j=0;
|
||||
for (i=0; i<app_versions.size(); i++) {
|
||||
APP_VERSION* avp = app_versions[i];
|
||||
if (avp->project != p) continue;
|
||||
|
|
|
@ -148,6 +148,12 @@ void RSC_PROJECT_WORK_FETCH::resource_backoff(PROJECT* p, const char* name) {
|
|||
|
||||
/////////////// RSC_WORK_FETCH ///////////////
|
||||
|
||||
void RSC_WORK_FETCH::copy_request(COPROC& c) {
|
||||
c.req_secs = req_secs;
|
||||
c.req_instances = req_instances;
|
||||
c.estimated_delay = req_secs?busy_time_estimator.get_busy_time():0;
|
||||
}
|
||||
|
||||
RSC_PROJECT_WORK_FETCH& RSC_WORK_FETCH::project_state(PROJECT* p) {
|
||||
return p->rsc_pwf[rsc_type];
|
||||
}
|
||||
|
@ -423,6 +429,27 @@ void WORK_FETCH::set_all_requests(PROJECT* p) {
|
|||
}
|
||||
#endif
|
||||
|
||||
// copy request fields from RSC_WORK_FETCH to COPROCS
|
||||
//
|
||||
void WORK_FETCH::copy_requests() {
|
||||
for (int i=0; i<coprocs.n_rsc; i++) {
|
||||
switch (coproc_type_name_to_num(coprocs.coprocs[i].type)) {
|
||||
case PROC_TYPE_NVIDIA_GPU:
|
||||
rsc_work_fetch[i].copy_request(coprocs.nvidia);
|
||||
break;
|
||||
case PROC_TYPE_AMD_GPU:
|
||||
rsc_work_fetch[i].copy_request(coprocs.ati);
|
||||
break;
|
||||
case PROC_TYPE_INTEL_GPU:
|
||||
rsc_work_fetch[i].copy_request(coprocs.intel_gpu);
|
||||
break;
|
||||
default:
|
||||
rsc_work_fetch[i].copy_request(coprocs.coprocs[i]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void WORK_FETCH::print_state() {
|
||||
msg_printf(0, MSG_INFO, "[work_fetch] ------- start work fetch state -------");
|
||||
msg_printf(0, MSG_INFO, "[work_fetch] target work buffer: %.2f + %.2f sec",
|
||||
|
|
|
@ -240,6 +240,7 @@ struct RSC_WORK_FETCH {
|
|||
void print_state(const char*);
|
||||
void clear_request();
|
||||
void set_request(PROJECT*);
|
||||
void copy_request(COPROC&);
|
||||
void set_request_excluded(PROJECT*);
|
||||
bool may_have_work(PROJECT*);
|
||||
int cant_fetch(PROJECT*);
|
||||
|
@ -311,6 +312,7 @@ struct WORK_FETCH {
|
|||
void clear_backoffs(APP_VERSION&);
|
||||
void request_string(char*);
|
||||
bool requested_work();
|
||||
void copy_requests();
|
||||
};
|
||||
|
||||
extern RSC_WORK_FETCH rsc_work_fetch[MAX_RSC];
|
||||
|
|
|
@ -251,22 +251,21 @@ int COPROCS::parse(XML_PARSER& xp) {
|
|||
void COPROCS::write_xml(MIOFILE& mf, bool scheduler_rpc) {
|
||||
#ifndef _USING_FCGI_
|
||||
mf.printf(" <coprocs>\n");
|
||||
if (nvidia.count) {
|
||||
nvidia.write_xml(mf, scheduler_rpc);
|
||||
}
|
||||
if (ati.count) {
|
||||
ati.write_xml(mf, scheduler_rpc);
|
||||
}
|
||||
if (intel_gpu.count) {
|
||||
intel_gpu.write_xml(mf, scheduler_rpc);
|
||||
}
|
||||
|
||||
for (int i=1; i<n_rsc; i++) {
|
||||
if (!strcmp("CUDA", coprocs[i].type)) continue;
|
||||
if (!strcmp(GPU_TYPE_NVIDIA, coprocs[i].type)) continue;
|
||||
if (!strcmp(GPU_TYPE_ATI, coprocs[i].type)) continue;
|
||||
if (!strcmp(GPU_TYPE_INTEL, coprocs[i].type)) continue;
|
||||
coprocs[i].write_xml(mf, scheduler_rpc);
|
||||
switch (coproc_type_name_to_num(coprocs[i].type)) {
|
||||
case PROC_TYPE_NVIDIA_GPU:
|
||||
nvidia.write_xml(mf, scheduler_rpc);
|
||||
break;
|
||||
case PROC_TYPE_AMD_GPU:
|
||||
ati.write_xml(mf, scheduler_rpc);
|
||||
break;
|
||||
case PROC_TYPE_INTEL_GPU:
|
||||
intel_gpu.write_xml(mf, scheduler_rpc);
|
||||
break;
|
||||
default:
|
||||
coprocs[i].write_xml(mf, scheduler_rpc);
|
||||
}
|
||||
}
|
||||
|
||||
mf.printf(" </coprocs>\n");
|
||||
|
|
Loading…
Reference in New Issue