diff --git a/checkin_notes b/checkin_notes index 0ff4c0d4cb..3c44205b98 100644 --- a/checkin_notes +++ b/checkin_notes @@ -5632,3 +5632,18 @@ David 20 Aug 2012 vda_lib2.cpp client/ app_control.cpp + +David 20 Aug 2012 + - client: take GPU exclusions into account when making + initial work request to a project + - client: put some casts to double in NVIDIA detect code. + Shouldn't make any difference. + - volunteer storage: truncate file to right size after retrieval + + client/ + work_fetch.cpp,h + gpu_nvidia.cpp + scheduler_op.cpp + vda/ + vdad.cpp + vda_lib2.dpp diff --git a/client/gpu_nvidia.cpp b/client/gpu_nvidia.cpp index 1ce7ec25a4..bd70caf364 100644 --- a/client/gpu_nvidia.cpp +++ b/client/gpu_nvidia.cpp @@ -304,11 +304,11 @@ void COPROC_NVIDIA::get( (*__cuDeviceTotalMem)(&global_mem, device); cc.prop.totalGlobalMem = (double) global_mem; (*__cuDeviceGetAttribute)(&itemp, CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK, device); - cc.prop.sharedMemPerBlock = itemp; + cc.prop.sharedMemPerBlock = (double) itemp; (*__cuDeviceGetAttribute)(&cc.prop.regsPerBlock, CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK, device); (*__cuDeviceGetAttribute)(&cc.prop.warpSize, CU_DEVICE_ATTRIBUTE_WARP_SIZE, device); (*__cuDeviceGetAttribute)(&itemp, CU_DEVICE_ATTRIBUTE_MAX_PITCH, device); - cc.prop.memPitch = itemp; + cc.prop.memPitch = (double) itemp; retval = (*__cuDeviceGetAttribute)(&cc.prop.maxThreadsPerBlock, CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK, device); retval = (*__cuDeviceGetAttribute)(&cc.prop.maxThreadsDim[0], CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X, device); (*__cuDeviceGetAttribute)(&cc.prop.maxThreadsDim[1], CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y, device); @@ -318,9 +318,9 @@ void COPROC_NVIDIA::get( (*__cuDeviceGetAttribute)(&cc.prop.maxGridSize[2], CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z, device); (*__cuDeviceGetAttribute)(&cc.prop.clockRate, CU_DEVICE_ATTRIBUTE_CLOCK_RATE, device); (*__cuDeviceGetAttribute)(&itemp, CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY, device); - cc.prop.totalConstMem = itemp; + cc.prop.totalConstMem = (double) itemp; (*__cuDeviceGetAttribute)(&itemp, CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT, device); - cc.prop.textureAlignment = itemp; + cc.prop.textureAlignment = (double) itemp; (*__cuDeviceGetAttribute)(&cc.prop.deviceOverlap, CU_DEVICE_ATTRIBUTE_GPU_OVERLAP, device); (*__cuDeviceGetAttribute)(&cc.prop.multiProcessorCount, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device); (*__cuDeviceGetAttribute)(&cc.pci_info.bus_id, CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, device); diff --git a/client/scheduler_op.cpp b/client/scheduler_op.cpp index 4241702e5b..ee8c377c94 100644 --- a/client/scheduler_op.cpp +++ b/client/scheduler_op.cpp @@ -106,7 +106,7 @@ int SCHEDULER_OP::init_op_project(PROJECT* p, int r) { } if (reason == RPC_REASON_INIT) { - work_fetch.set_initial_work_request(); + work_fetch.set_initial_work_request(p); if (!gstate.cpu_benchmarks_done()) { gstate.cpu_benchmarks_set_defaults(); } diff --git a/client/work_fetch.cpp b/client/work_fetch.cpp index ef816e6e91..e7384dc590 100644 --- a/client/work_fetch.cpp +++ b/client/work_fetch.cpp @@ -872,9 +872,15 @@ void WORK_FETCH::handle_reply( // arrange to always get one job, even if we don't need it or can't handle it. // (this is probably what user wants) // -void WORK_FETCH::set_initial_work_request() { +void WORK_FETCH::set_initial_work_request(PROJECT* p) { for (int i=0; incoprocs_excluded[i]) { + rsc_work_fetch[i].req_secs = 0; + } + } rsc_work_fetch[i].req_instances = 0; rsc_work_fetch[i].busy_time_estimator.reset(); } diff --git a/client/work_fetch.h b/client/work_fetch.h index 1633882671..975d14503c 100644 --- a/client/work_fetch.h +++ b/client/work_fetch.h @@ -293,7 +293,7 @@ struct WORK_FETCH { void handle_reply( PROJECT*, SCHEDULER_REPLY*, std::vectornew_results ); - void set_initial_work_request(); + void set_initial_work_request(PROJECT*); void set_all_requests(PROJECT*); void set_all_requests_hyst(PROJECT*, int rsc_type); void print_state(); diff --git a/vda/vda_lib2.cpp b/vda/vda_lib2.cpp index b54585ff6d..de2a7e500c 100644 --- a/vda/vda_lib2.cpp +++ b/vda/vda_lib2.cpp @@ -591,16 +591,16 @@ int VDA_FILE_AUX::choose_host() { // if (!available_hosts.size()) { int nhosts_scanned = 0; - int rand_id; + int max_id, rand_id; for (int i=0; i<2; i++) { char buf[256]; if (i == 0) { - retval = host.max_id(rand_id, ""); + retval = host.max_id(max_id, ""); if (retval) { log_messages.printf(MSG_CRITICAL, "host.max_id() failed\n"); return 0; } - rand_id = (int)(((double)id)*drand()); + rand_id = (int)(((double)max_id)*drand()); sprintf(buf, "where %s and id>=%d order by id limit 100", host_alive_clause(), rand_id diff --git a/vda/vdad.cpp b/vda/vdad.cpp index 14d5ba8bb9..ccf410db4f 100644 --- a/vda/vdad.cpp +++ b/vda/vdad.cpp @@ -88,7 +88,27 @@ int handle_file(VDA_FILE_AUX& vf, DB_VDA_FILE& dvf) { case PRESENT: // we have enough chunks to reconstruct it - do so // - vf.meta_chunk->reconstruct(); + retval = vf.meta_chunk->reconstruct(); + if (retval) { + log_messages.printf(MSG_CRITICAL, + "reconstruct of %s failed: %d\n", vf.file_name, retval + ); + } else { + log_messages.printf(MSG_NORMAL, + "retrieval of %s completed successfully\n", vf.file_name + ); + + // Decoding produces a file with unused space at the end. + // Remove this space. + // + sprintf(buf, "truncate %s/%s --reference %s/%s", + vf.dir, vf.file_name, vf.dir, vf.file_name + ); + system(buf); + + dvf.retrieved = true; + dvf.update(); + } break; case RECOVERABLE: // otherwise start all possible uploads