mirror of https://github.com/BOINC/boinc.git
- client: take GPU exclusions into account when making
initial work request to a project - client: put some casts to double in NVIDIA detect code. Shouldn't make any difference. - volunteer storage: truncate file to right size after retrieval svn path=/trunk/boinc/; revision=26051
This commit is contained in:
parent
52068b5f2d
commit
446bc4ca28
|
@ -5632,3 +5632,18 @@ David 20 Aug 2012
|
|||
vda_lib2.cpp
|
||||
client/
|
||||
app_control.cpp
|
||||
|
||||
David 20 Aug 2012
|
||||
- client: take GPU exclusions into account when making
|
||||
initial work request to a project
|
||||
- client: put some casts to double in NVIDIA detect code.
|
||||
Shouldn't make any difference.
|
||||
- volunteer storage: truncate file to right size after retrieval
|
||||
|
||||
client/
|
||||
work_fetch.cpp,h
|
||||
gpu_nvidia.cpp
|
||||
scheduler_op.cpp
|
||||
vda/
|
||||
vdad.cpp
|
||||
vda_lib2.dpp
|
||||
|
|
|
@ -304,11 +304,11 @@ void COPROC_NVIDIA::get(
|
|||
(*__cuDeviceTotalMem)(&global_mem, device);
|
||||
cc.prop.totalGlobalMem = (double) global_mem;
|
||||
(*__cuDeviceGetAttribute)(&itemp, CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK, device);
|
||||
cc.prop.sharedMemPerBlock = itemp;
|
||||
cc.prop.sharedMemPerBlock = (double) itemp;
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.regsPerBlock, CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.warpSize, CU_DEVICE_ATTRIBUTE_WARP_SIZE, device);
|
||||
(*__cuDeviceGetAttribute)(&itemp, CU_DEVICE_ATTRIBUTE_MAX_PITCH, device);
|
||||
cc.prop.memPitch = itemp;
|
||||
cc.prop.memPitch = (double) itemp;
|
||||
retval = (*__cuDeviceGetAttribute)(&cc.prop.maxThreadsPerBlock, CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK, device);
|
||||
retval = (*__cuDeviceGetAttribute)(&cc.prop.maxThreadsDim[0], CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.maxThreadsDim[1], CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y, device);
|
||||
|
@ -318,9 +318,9 @@ void COPROC_NVIDIA::get(
|
|||
(*__cuDeviceGetAttribute)(&cc.prop.maxGridSize[2], CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.clockRate, CU_DEVICE_ATTRIBUTE_CLOCK_RATE, device);
|
||||
(*__cuDeviceGetAttribute)(&itemp, CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY, device);
|
||||
cc.prop.totalConstMem = itemp;
|
||||
cc.prop.totalConstMem = (double) itemp;
|
||||
(*__cuDeviceGetAttribute)(&itemp, CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT, device);
|
||||
cc.prop.textureAlignment = itemp;
|
||||
cc.prop.textureAlignment = (double) itemp;
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.deviceOverlap, CU_DEVICE_ATTRIBUTE_GPU_OVERLAP, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.multiProcessorCount, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.pci_info.bus_id, CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, device);
|
||||
|
|
|
@ -106,7 +106,7 @@ int SCHEDULER_OP::init_op_project(PROJECT* p, int r) {
|
|||
}
|
||||
|
||||
if (reason == RPC_REASON_INIT) {
|
||||
work_fetch.set_initial_work_request();
|
||||
work_fetch.set_initial_work_request(p);
|
||||
if (!gstate.cpu_benchmarks_done()) {
|
||||
gstate.cpu_benchmarks_set_defaults();
|
||||
}
|
||||
|
|
|
@ -872,9 +872,15 @@ void WORK_FETCH::handle_reply(
|
|||
// arrange to always get one job, even if we don't need it or can't handle it.
|
||||
// (this is probably what user wants)
|
||||
//
|
||||
void WORK_FETCH::set_initial_work_request() {
|
||||
void WORK_FETCH::set_initial_work_request(PROJECT* p) {
|
||||
for (int i=0; i<coprocs.n_rsc; i++) {
|
||||
rsc_work_fetch[i].req_secs = 1;
|
||||
if (i) {
|
||||
RSC_WORK_FETCH& rwf = rsc_work_fetch[i];
|
||||
if (rwf.ninstances == p->ncoprocs_excluded[i]) {
|
||||
rsc_work_fetch[i].req_secs = 0;
|
||||
}
|
||||
}
|
||||
rsc_work_fetch[i].req_instances = 0;
|
||||
rsc_work_fetch[i].busy_time_estimator.reset();
|
||||
}
|
||||
|
|
|
@ -293,7 +293,7 @@ struct WORK_FETCH {
|
|||
void handle_reply(
|
||||
PROJECT*, SCHEDULER_REPLY*, std::vector<RESULT*>new_results
|
||||
);
|
||||
void set_initial_work_request();
|
||||
void set_initial_work_request(PROJECT*);
|
||||
void set_all_requests(PROJECT*);
|
||||
void set_all_requests_hyst(PROJECT*, int rsc_type);
|
||||
void print_state();
|
||||
|
|
|
@ -591,16 +591,16 @@ int VDA_FILE_AUX::choose_host() {
|
|||
//
|
||||
if (!available_hosts.size()) {
|
||||
int nhosts_scanned = 0;
|
||||
int rand_id;
|
||||
int max_id, rand_id;
|
||||
for (int i=0; i<2; i++) {
|
||||
char buf[256];
|
||||
if (i == 0) {
|
||||
retval = host.max_id(rand_id, "");
|
||||
retval = host.max_id(max_id, "");
|
||||
if (retval) {
|
||||
log_messages.printf(MSG_CRITICAL, "host.max_id() failed\n");
|
||||
return 0;
|
||||
}
|
||||
rand_id = (int)(((double)id)*drand());
|
||||
rand_id = (int)(((double)max_id)*drand());
|
||||
sprintf(buf,
|
||||
"where %s and id>=%d order by id limit 100",
|
||||
host_alive_clause(), rand_id
|
||||
|
|
22
vda/vdad.cpp
22
vda/vdad.cpp
|
@ -88,7 +88,27 @@ int handle_file(VDA_FILE_AUX& vf, DB_VDA_FILE& dvf) {
|
|||
case PRESENT:
|
||||
// we have enough chunks to reconstruct it - do so
|
||||
//
|
||||
vf.meta_chunk->reconstruct();
|
||||
retval = vf.meta_chunk->reconstruct();
|
||||
if (retval) {
|
||||
log_messages.printf(MSG_CRITICAL,
|
||||
"reconstruct of %s failed: %d\n", vf.file_name, retval
|
||||
);
|
||||
} else {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"retrieval of %s completed successfully\n", vf.file_name
|
||||
);
|
||||
|
||||
// Decoding produces a file with unused space at the end.
|
||||
// Remove this space.
|
||||
//
|
||||
sprintf(buf, "truncate %s/%s --reference %s/%s",
|
||||
vf.dir, vf.file_name, vf.dir, vf.file_name
|
||||
);
|
||||
system(buf);
|
||||
|
||||
dvf.retrieved = true;
|
||||
dvf.update();
|
||||
}
|
||||
break;
|
||||
case RECOVERABLE:
|
||||
// otherwise start all possible uploads
|
||||
|
|
Loading…
Reference in New Issue