From 68f9880615cb341bea4cb996afeaf1d381ac4416 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Thu, 5 Jul 2012 20:24:17 +0000 Subject: [PATCH] - client: remove "device" entry from CUDA_DEVICE_PROP, and change types of mem-size fields from int to double. These fields are size_t in NVIDIA's version of this; however, cuDeviceGetAttribute() returns them as int, so I don't see where this makes any difference. - client: fix bug in handling of element. - scheduler: message tweaks. Note: [foo] means that the message is enabled by . svn path=/trunk/boinc/; revision=25849 --- checkin_notes | 26 +++++++++++++++++ client/gpu_nvidia.cpp | 17 ++++++----- client/scheduler_op.cpp | 2 +- client/work_fetch.cpp | 1 - db/boinc_db.h | 37 ------------------------ db/boinc_db_types.h | 37 ++++++++++++++++++++++++ lib/coproc.cpp | 29 +++++++++---------- lib/coproc.h | 11 +++---- sched/plan_class_spec.cpp | 60 +++++++++++++++++++++------------------ sched/sched_types.cpp | 29 ++++++++++++------- vda/sched_vda.cpp | 47 +++++++++++++++++++++--------- 11 files changed, 178 insertions(+), 118 deletions(-) diff --git a/checkin_notes b/checkin_notes index ad5e0f1759..b9c932e288 100644 --- a/checkin_notes +++ b/checkin_notes @@ -4727,3 +4727,29 @@ Charlie 5 July 2012 mac_installer/ release_boinc.sh + +David 5 July 2012 + - client: remove "device" entry from CUDA_DEVICE_PROP, + and change types of mem-size fields from int to double. + These fields are size_t in NVIDIA's version of this; + however, cuDeviceGetAttribute() returns them as int, + so I don't see where this makes any difference. + - client: fix bug in handling of element. + - scheduler: message tweaks. + Note: [foo] means that the message is enabled by . + + + lib/ + coproc.cpp,h + client/ + gui_nvidia.cpp + work_fetch.cpp + scheduler_op.cpp + db/ + boinc_db_types.h + boind_db.h + vda/ + sched_vda.cpp + sched/ + plan_class_spec.cpp + sched_types.cpp diff --git a/client/gpu_nvidia.cpp b/client/gpu_nvidia.cpp index 15f5439024..1ce7ec25a4 100644 --- a/client/gpu_nvidia.cpp +++ b/client/gpu_nvidia.cpp @@ -280,21 +280,20 @@ void COPROC_NVIDIA::get( sprintf(buf, "NVIDIA library reports %d GPU%s", cuda_ndevs, (cuda_ndevs==1)?"":"s"); warnings.push_back(buf); - int j; + int j, itemp; unsigned int i; size_t global_mem; COPROC_NVIDIA cc; string s; for (j=0; janonymous_platform) { - handle_no_rsc_apps(buf, project, btemp); + handle_no_rsc_apps(buf, project, true); } continue; } else if (xp.parse_bool("verify_files_on_app_start", project->verify_files_on_app_start)) { diff --git a/client/work_fetch.cpp b/client/work_fetch.cpp index 6b64783922..2b839211ab 100644 --- a/client/work_fetch.cpp +++ b/client/work_fetch.cpp @@ -997,4 +997,3 @@ void CLIENT_STATE::generate_new_host_cpid() { } } } - diff --git a/db/boinc_db.h b/db/boinc_db.h index c52cd1d915..abe405e973 100644 --- a/db/boinc_db.h +++ b/db/boinc_db.h @@ -481,43 +481,6 @@ public: std::vector items; }; -struct VDA_FILE { - int id; - double create_time; - char dir[256]; - char name[256]; - double size; - bool need_update; - bool initialized; - bool retrieving; - void clear(); -}; - -struct VDA_CHUNK_HOST { - double create_time; - int vda_file_id; - int host_id; - char name[256]; // C1.C2.Cn - double size; - bool present_on_host; - bool transfer_in_progress; - bool transfer_wait; - double transfer_request_time; - // when vdad assigned this chunk to this host - double transfer_send_time; - // when transfer request was sent to host - - // the following not in DB - // - bool found; - - void clear(); - inline bool download_in_progress() { - return (transfer_in_progress && !present_on_host); - } - -}; - struct DB_VDA_FILE : public DB_BASE, public VDA_FILE { DB_VDA_FILE(DB_CONN* p=0); int get_id(); diff --git a/db/boinc_db_types.h b/db/boinc_db_types.h index e67b64b1f1..cf81ada8e1 100644 --- a/db/boinc_db_types.h +++ b/db/boinc_db_types.h @@ -675,4 +675,41 @@ struct HOST_APP_VERSION { bool daily_quota_exceeded; }; +struct VDA_FILE { + int id; + double create_time; + char dir[256]; + char name[256]; + double size; + bool need_update; + bool initialized; + bool retrieving; + void clear(); +}; + +struct VDA_CHUNK_HOST { + double create_time; + int vda_file_id; + int host_id; + char name[256]; // C1.C2.Cn + double size; + bool present_on_host; + bool transfer_in_progress; + bool transfer_wait; + double transfer_request_time; + // when vdad assigned this chunk to this host + double transfer_send_time; + // when transfer request was sent to host + + // the following not in DB + // + bool found; + + void clear(); + inline bool download_in_progress() { + return (transfer_in_progress && !present_on_host); + } + +}; + #endif diff --git a/lib/coproc.cpp b/lib/coproc.cpp index 10c65e24ad..dce9bbaf5a 100644 --- a/lib/coproc.cpp +++ b/lib/coproc.cpp @@ -425,36 +425,36 @@ void COPROC_NVIDIA::write_xml(MIOFILE& f, bool scheduler_rpc) { " %d\n" " %d\n" " %f\n" - " %u\n" + " %f\n" " %d\n" " %d\n" - " %u\n" + " %f\n" " %d\n" " %d %d %d\n" " %d %d %d\n" " %d\n" - " %u\n" + " %f\n" " %d\n" " %d\n" - " %u\n" + " %f\n" " %d\n" " %d\n", peak_flops, cuda_version, display_driver_version, prop.totalGlobalMem, - (unsigned int)prop.sharedMemPerBlock, + prop.sharedMemPerBlock, prop.regsPerBlock, prop.warpSize, - (unsigned int)prop.memPitch, + prop.memPitch, prop.maxThreadsPerBlock, prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2], prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2], prop.clockRate, - (unsigned int)prop.totalConstMem, + prop.totalConstMem, prop.major, prop.minor, - (unsigned int)prop.textureAlignment, + prop.textureAlignment, prop.deviceOverlap, prop.multiProcessorCount ); @@ -480,7 +480,6 @@ void COPROC_NVIDIA::clear() { cuda_version = 0; display_driver_version = 0; strcpy(prop.name, ""); - prop.deviceHandle = 0; prop.totalGlobalMem = 0; prop.sharedMemPerBlock = 0; prop.regsPerBlock = 0; @@ -529,12 +528,11 @@ int COPROC_NVIDIA::parse(XML_PARSER& xp) { if (xp.parse_int("cudaVersion", cuda_version)) continue; if (xp.parse_int("drvVersion", display_driver_version)) continue; if (xp.parse_str("name", prop.name, sizeof(prop.name))) continue; - if (xp.parse_int("deviceHandle", prop.deviceHandle)) continue; if (xp.parse_double("totalGlobalMem", prop.totalGlobalMem)) continue; - if (xp.parse_int("sharedMemPerBlock", (int&)prop.sharedMemPerBlock)) continue; + if (xp.parse_double("sharedMemPerBlock", prop.sharedMemPerBlock)) continue; if (xp.parse_int("regsPerBlock", prop.regsPerBlock)) continue; if (xp.parse_int("warpSize", prop.warpSize)) continue; - if (xp.parse_int("memPitch", (int&)prop.memPitch)) continue; + if (xp.parse_double("memPitch", prop.memPitch)) continue; if (xp.parse_int("maxThreadsPerBlock", prop.maxThreadsPerBlock)) continue; if (xp.parse_str("maxThreadsDim", buf2, sizeof(buf2))) { // can't use sscanf here (FCGI) @@ -567,10 +565,10 @@ int COPROC_NVIDIA::parse(XML_PARSER& xp) { continue; } if (xp.parse_int("clockRate", prop.clockRate)) continue; - if (xp.parse_int("totalConstMem", (int&)prop.totalConstMem)) continue; + if (xp.parse_double("totalConstMem", prop.totalConstMem)) continue; if (xp.parse_int("major", prop.major)) continue; if (xp.parse_int("minor", prop.minor)) continue; - if (xp.parse_int("textureAlignment", (int&)prop.textureAlignment)) continue; + if (xp.parse_double("textureAlignment", prop.textureAlignment)) continue; if (xp.parse_int("deviceOverlap", prop.deviceOverlap)) continue; if (xp.parse_int("multiProcessorCount", prop.multiProcessorCount)) continue; if (xp.match_tag("pci_info")) { @@ -892,9 +890,10 @@ void COPROC_ATI::fake(double ram, double avail_ram, int n) { const char* proc_type_name_xml(int pt) { switch(pt) { + case PROC_TYPE_CPU: return "CPU"; case PROC_TYPE_NVIDIA_GPU: return "CUDA"; case PROC_TYPE_AMD_GPU: return "ATI"; - case PROC_TYPE_INTEL_GPU: return "INTEL"; + case PROC_TYPE_INTEL_GPU: return "intel_gpu"; } return "unknown"; } diff --git a/lib/coproc.h b/lib/coproc.h index a56e2ffd0f..e186429b1b 100644 --- a/lib/coproc.h +++ b/lib/coproc.h @@ -288,24 +288,25 @@ struct COPROC { // struct CUDA_DEVICE_PROP { char name[256]; - int deviceHandle; double totalGlobalMem; - int sharedMemPerBlock; + double sharedMemPerBlock; int regsPerBlock; int warpSize; - int memPitch; + double memPitch; int maxThreadsPerBlock; int maxThreadsDim[3]; int maxGridSize[3]; int clockRate; - int totalConstMem; + double totalConstMem; int major; // compute capability int minor; - int textureAlignment; + double textureAlignment; int deviceOverlap; int multiProcessorCount; }; +typedef int CUdevice; + struct COPROC_NVIDIA : public COPROC { int cuda_version; // CUDA runtime version int display_driver_version; diff --git a/sched/plan_class_spec.cpp b/sched/plan_class_spec.cpp index b23d4ba60e..cf636ecf7c 100644 --- a/sched/plan_class_spec.cpp +++ b/sched/plan_class_spec.cpp @@ -72,7 +72,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (!strstr(buf, cpu_features[i].c_str())) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] CPU lacks feature '%s' (got '%s')\n", + "[version] plan_class_spec: CPU lacks feature '%s' (got '%s')\n", cpu_features[i].c_str(), sreq.host.p_features ); } @@ -86,7 +86,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (min_ncpus && g_wreq->effective_ncpus < min_ncpus) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] not enough CPUs: %d < %f\n", + "[version] plan_class_spec: not enough CPUs: %d < %f\n", g_wreq->effective_ncpus, min_ncpus ); } @@ -98,7 +98,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (have_os_regex && regexec(&(os_regex), sreq.host.os_version, 0, NULL, 0)) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] OS version '%s' didn't match regexp\n", + "[version] plan_class_spec: OS version '%s' didn't match regexp\n", sreq.host.os_version ); } @@ -125,7 +125,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (n != 3) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] can't parse vbox version\n" + "[version] plan_class_spec: can't parse vbox version\n" ); } return false; @@ -134,7 +134,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (min_vbox_version && v < min_vbox_version) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] vbox version too low: %d < %d\n", + "[version] plan_class_spec: vbox version too low: %d < %d\n", v, min_vbox_version ); } @@ -143,7 +143,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (max_vbox_version && v > max_vbox_version) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] vbox version too high: %d > %d\n", + "[version] plan_class_spec: vbox version too high: %d > %d\n", v, max_vbox_version ); } @@ -182,14 +182,14 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { bool p = parse_str(buf, tag, value, sizeof(value)); if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] parsed project prefs setting '%s' : %s\n", + "[version] plan_class_spec: parsed project prefs setting '%s' : %s\n", project_prefs_tag, p?"true":"false" ); } if (regexec(&(project_prefs_regex), value, 0, NULL, 0)) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] project prefs setting '%s' prevents using plan class.\n", + "[version] plan_class_spec: project prefs setting '%s' prevents using plan class.\n", project_prefs_tag ); } @@ -212,7 +212,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { bool p = parse_double(buf, tag, v); if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] parsed project prefs setting '%s' : %s : %f\n", + "[version] plan_class_spec: parsed project prefs setting '%s' : %s : %f\n", gpu_utilization_tag, p?"true":"false", v ); } @@ -230,7 +230,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (!cp.count) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] No ATI devices found\n" + "[version] plan_class_spec: No AMD GPUs found\n" ); } return false; @@ -245,14 +245,18 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (need_ati_libs) { if (!cp.atirt_detected) { if (config.debug_version_select) { - log_messages.printf(MSG_NORMAL,"[version] [plan_class_spec] ATI run time libraries not found\n"); + log_messages.printf(MSG_NORMAL, + "[version] plan_class_spec: ATI libraries not found\n" + ); } return false; } } else { if (!cp.amdrt_detected) { if (config.debug_version_select) { - log_messages.printf(MSG_NORMAL,"[version] [plan_class_spec] [plan_class_spec] AMD run time libraries not found\n"); + log_messages.printf(MSG_NORMAL, + "[version] plan_class_spec: AMD libraries not found\n" + ); } return false; } @@ -261,7 +265,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (min_cal_target && cp.attribs.target < min_cal_target) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] CAL target less than minimum (%d < %d)\n", + "[version] plan_class_spec: CAL target less than minimum (%d < %d)\n", cp.attribs.target, min_cal_target ); } @@ -270,7 +274,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (max_cal_target && cp.attribs.target > max_cal_target) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] CAL target greater than maximum (%d > %d)\n", + "[version] plan_class_spec: CAL target greater than maximum (%d > %d)\n", cp.attribs.target, max_cal_target ); } @@ -287,7 +291,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (scanned != 3) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] driver version '%s' couldn't be parsed\n", + "[version] plan_class_spec: driver version '%s' couldn't be parsed\n", cp.version ); } @@ -298,7 +302,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { } else { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] no CAL, driver version couldn't be determined\n" + "[version] plan_class_spec: no CAL, driver version couldn't be determined\n" ); } } @@ -312,7 +316,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (!cp.count) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] No NVIDIA devices found\n" + "[version] plan_class_spec: No NVIDIA GPUs found\n" ); } return false; @@ -329,7 +333,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (min_nvidia_compcap && min_nvidia_compcap > v) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] NVIDIA compute capability required min: %d, supplied: %d\n", + "[version] plan_class_spec: NVIDIA compute capability required min: %d, supplied: %d\n", min_nvidia_compcap, v ); } @@ -338,7 +342,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (max_nvidia_compcap && max_nvidia_compcap < v) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] CUDA compute capability required max: %d, supplied: %d\n", + "[version] plan_class_spec: CUDA compute capability required max: %d, supplied: %d\n", max_nvidia_compcap, v ); } @@ -349,7 +353,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (min_cuda_version && min_cuda_version > cp.cuda_version) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] CUDA version required min: %d, supplied: %d\n", + "[version] plan_class_spec: CUDA version required min: %d, supplied: %d\n", min_cuda_version, cp.cuda_version ); } @@ -358,7 +362,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (max_cuda_version && max_cuda_version < cp.cuda_version) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] CUDA version required max: %d, supplied: %d\n", + "[version] plan_class_spec: CUDA version required max: %d, supplied: %d\n", max_cuda_version, cp.cuda_version ); } @@ -377,7 +381,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (!cp.count) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] No NVIDIA devices found\n" + "[version] [version] No Intel GPUs found\n" ); } return false; @@ -392,7 +396,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (!cpp->have_opencl) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] GPU doesn't support OpenCL\n" + "[version] GPU doesn't support OpenCL\n" ); } return false; @@ -403,7 +407,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (min_opencl_version && min_opencl_version > cpp->opencl_prop.opencl_device_version_int) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] OpenCL device version required min: %d, supplied: %d\n", + "[version] OpenCL device version required min: %d, supplied: %d\n", min_opencl_version, cpp->opencl_prop.opencl_device_version_int ); } @@ -421,7 +425,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (min_gpu_ram_mb && min_gpu_ram_mb * MEGA > gpu_ram) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] GPU RAM required min: %f, supplied: %f\n", + "[version] plan_class_spec: GPU RAM required min: %f, supplied: %f\n", min_gpu_ram_mb * MEGA, gpu_ram ); } @@ -433,7 +437,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (min_driver_version > driver_version) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] driver version required min: %d, supplied: %d\n", + "[version] plan_class_spec: driver version required min: %d, supplied: %d\n", abs(min_driver_version), driver_version ); } @@ -444,7 +448,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (max_driver_version < driver_version) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] driver version required max: %d, supplied: %d\n", + "[version] plan_class_spec: driver version required max: %d, supplied: %d\n", abs(max_driver_version), driver_version ); } @@ -514,7 +518,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { if (config.debug_version_select) { log_messages.printf(MSG_NORMAL, - "[version] [plan_class_spec] host_flops: %e, \tscale: %.2f, \tprojected_flops: %e, \tpeak_flops: %e\n", + "[version] plan_class_spec: host_flops: %e, \tscale: %.2f, \tprojected_flops: %e, \tpeak_flops: %e\n", sreq.host.p_fpops, projected_flops_scale, hu.projected_flops, hu.peak_flops ); diff --git a/sched/sched_types.cpp b/sched/sched_types.cpp index c0c10e569d..1cfcdaa1ab 100644 --- a/sched/sched_types.cpp +++ b/sched/sched_types.cpp @@ -892,16 +892,25 @@ int SCHEDULER_REPLY::write(FILE* fout, SCHEDULER_REQUEST& sreq) { fprintf(fout, "%s", file_transfer_requests[i].c_str()); } - fprintf(fout, - "%d\n" - "%d\n" - "%d\n" - "%d\n", - ssp->have_apps_for_proc_type[PROC_TYPE_CPU]?0:1, - ssp->have_apps_for_proc_type[PROC_TYPE_NVIDIA_GPU]?0:1, - ssp->have_apps_for_proc_type[PROC_TYPE_AMD_GPU]?0:1, - ssp->have_apps_for_proc_type[PROC_TYPE_INTEL_GPU]?0:1 - ); + if (g_request->core_client_version < 73000) { + fprintf(fout, + "%d\n" + "%d\n" + "%d\n", + ssp->have_apps_for_proc_type[PROC_TYPE_CPU]?0:1, + ssp->have_apps_for_proc_type[PROC_TYPE_NVIDIA_GPU]?0:1, + ssp->have_apps_for_proc_type[PROC_TYPE_AMD_GPU]?0:1 + ); + } else { + for (i=0; ihave_apps_for_proc_type[i]) { + fprintf(fout, + "%s\n", + proc_type_name_xml(i) + ); + } + } + } gui_urls.get_gui_urls(user, host, team, buf); fputs(buf, fout); if (project_files.text) { diff --git a/vda/sched_vda.cpp b/vda/sched_vda.cpp index 57b84749ae..103ccb82a4 100644 --- a/vda/sched_vda.cpp +++ b/vda/sched_vda.cpp @@ -153,13 +153,30 @@ static int process_completed_upload(char* chunk_name, CHUNK_LIST& chunks) { return 0; } -// process a present file +// Process a present file; possibilities: +// - a download finished +// - this host hasn't communicated in a while, and we deleted the +// VDA_CHUNK_HOST record +// So: // - create a vda_chunk_host record if needed // - set present_on_host flag in vda_chunk_host // - mark our in-memory vda_chunk_host record as "found" // - mark vda_file for update // static int process_present_file(FILE_INFO& fi, CHUNK_LIST& chunks) { + DB_VDA_CHUNK_HOST* ch; + CHUNK_LIST::iterator cli = chunks.find(string(fi.name)); + if (cli == chunks.end()) { + // don't have a record of this chunk on this host; make one + // + ch = new DB_VDA_CHUNK_HOST; + ch->create_time = dtime(); + + } else { + ch = &(cli->second); + } + ch->transfer_in_progress = false; + mark_for_update(ch->vda_file_id); return 0; } @@ -327,22 +344,24 @@ void handle_vda() { } // process completed uploads + // NOTE: completed downloads are handled below // for (i=0; ifile_xfer_results.size(); i++) { RESULT& r = g_request->file_xfer_results[i]; - if (!starts_with(r.name, "vda_upload_")) continue; - char* chunk_name = r.name + strlen("vda_upload_"); - if (config.debug_vda) { - log_messages.printf(MSG_NORMAL, - "[vda] DB: completed upload %s\n", chunk_name - ); - } - retval = process_completed_upload(chunk_name, chunks); - if (retval) { - log_messages.printf(MSG_CRITICAL, - "[vda] process_completed_upload(): %d\n", retval - ); - return; + if (strstr(r.name, "vda_upload")) { + char* chunk_name = r.name + strlen("vda_upload_"); + if (config.debug_vda) { + log_messages.printf(MSG_NORMAL, + "[vda] DB: completed upload %s\n", chunk_name + ); + } + retval = process_completed_upload(chunk_name, chunks); + if (retval) { + log_messages.printf(MSG_CRITICAL, + "[vda] process_completed_upload(): %d\n", retval + ); + return; + } } }