diff --git a/checkin_notes b/checkin_notes index da60c95e11..944f7d1be2 100644 --- a/checkin_notes +++ b/checkin_notes @@ -7409,3 +7409,24 @@ Rom 8 Dec 2012 lib\ cc_config.cpp, .h coproc.cpp, .h + +Rom 8 Dec 2012 + - client/server: fix build breaks I introduced last night with a variable + rename. + + client\ + acct_mgr.cpp + cpu_sched.cpp + gpu_detect.cpp + clientgui\ + AdvancedFrame.cpp + AsyncRPC.cpp + BOINCTaskBar.cpp + DlgItemProperties.cpp + lib\ + gui_rpc_client.h + gui_rpc_client_ops.cpp + sched\ + plan_class_spec.cpp + sched_send.cpp + sched_types.cpp diff --git a/client/acct_mgr.cpp b/client/acct_mgr.cpp index fcbe849126..1dedb10b4f 100644 --- a/client/acct_mgr.cpp +++ b/client/acct_mgr.cpp @@ -269,7 +269,7 @@ int AM_ACCOUNT::parse(XML_PARSER& xp) { handle_no_rsc(GPU_TYPE_NVIDIA, btemp); continue; } - if (xp.parse_bool("no_intel_gpu", btemp)) { + if (xp.parse_bool("no_intel", btemp)) { handle_no_rsc(GPU_TYPE_INTEL, btemp); continue; } diff --git a/client/cpu_sched.cpp b/client/cpu_sched.cpp index ddfc82d9bb..ef7d6f7552 100644 --- a/client/cpu_sched.cpp +++ b/client/cpu_sched.cpp @@ -1347,7 +1347,7 @@ static inline void assign_coprocs(vector& jobs) { if (coprocs.have_ati()) { copy_available_ram(coprocs.ati, GPU_TYPE_ATI); } - if (coprocs.have_intel_gpu()) { + if (coprocs.have_intel()) { copy_available_ram(coprocs.intel_gpu, GPU_TYPE_INTEL); } #endif diff --git a/client/gpu_detect.cpp b/client/gpu_detect.cpp index 685e67f753..07c3c5a7d1 100644 --- a/client/gpu_detect.cpp +++ b/client/gpu_detect.cpp @@ -125,7 +125,7 @@ void COPROCS::get( if (setjmp(resume)) { warnings.push_back("Caught SIGSEGV in OpenCL detection"); } else { - get_opencl(use_all, warnings, ignore_ati_dev, ignore_nvidia_dev, ignore_intel_gpu_dev); + get_opencl(use_all, warnings, ignore_ati_dev, ignore_nvidia_dev, ignore_intel_dev); } signal(SIGSEGV, old_sig); #endif @@ -141,7 +141,11 @@ void COPROCS::get( break; case COPROC_UNUSED: default: - sprintf(buf2, "NVIDIA GPU %d (not used): %s", nvidia_gpus[i].device_num, buf); + if (nvidia_gpus[i].opencl_prop.is_used) { + sprintf(buf2, "NVIDIA GPU %d (OpenCL only): %s", nvidia_gpus[i].device_num, buf); + } else { + sprintf(buf2, "NVIDIA GPU %d (not used): %s", nvidia_gpus[i].device_num, buf); + } break; } descs.push_back(string(buf2)); @@ -158,7 +162,11 @@ void COPROCS::get( break; case COPROC_UNUSED: default: - sprintf(buf2, "ATI GPU %d: (not used) %s", ati_gpus[i].device_num, buf); + if (ati_gpus[i].opencl_prop.is_used) { + sprintf(buf2, "ATI GPU %d: (OpenCL only) %s", ati_gpus[i].device_num, buf); + } else { + sprintf(buf2, "ATI GPU %d: (not used) %s", ati_gpus[i].device_num, buf); + } break; } descs.push_back(string(buf2)); @@ -175,7 +183,11 @@ void COPROCS::get( break; case COPROC_UNUSED: default: - sprintf(buf2, "INTEL GPU %d: (not used) %s", intel_gpus[i].device_num, buf); + if (intel_gpus[i].opencl_prop.is_used) { + sprintf(buf2, "INTEL GPU %d: (OpenCL only) %s", intel_gpus[i].device_num, buf); + } else { + sprintf(buf2, "INTEL GPU %d: (not used) %s", intel_gpus[i].device_num, buf); + } break; } descs.push_back(string(buf2)); diff --git a/clientgui/AdvancedFrame.cpp b/clientgui/AdvancedFrame.cpp index bb7dedae50..87e6c12c9b 100644 --- a/clientgui/AdvancedFrame.cpp +++ b/clientgui/AdvancedFrame.cpp @@ -470,7 +470,7 @@ bool CAdvancedFrame::CreateMenu() { _("Stop work regardless of preferences") ); - if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) { + if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) { #ifndef __WXGTK__ menuActivity->AppendSeparator(); @@ -1822,7 +1822,7 @@ void CAdvancedFrame::OnFrameRender(wxTimerEvent& WXUNUSED(event)) { CC_STATUS status; if ((pDoc->IsConnected()) && (0 == pDoc->GetCoreClientStatus(status))) { UpdateActivityModeControls(status); - if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) { + if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) { UpdateGPUModeControls(status); } UpdateNetworkModeControls(status); diff --git a/clientgui/AsyncRPC.cpp b/clientgui/AsyncRPC.cpp index 1047437d7d..6832333ad1 100644 --- a/clientgui/AsyncRPC.cpp +++ b/clientgui/AsyncRPC.cpp @@ -1006,7 +1006,7 @@ void CMainDocument::HandleCompletedRPC() { exchangeBuf->executing_as_daemon = arg1->executing_as_daemon; exchangeBuf->have_nvidia = arg1->have_nvidia; exchangeBuf->have_ati = arg1->have_ati; - exchangeBuf->have_intel_gpu = arg1->have_intel_gpu; + exchangeBuf->have_intel = arg1->have_intel; } break; case RPC_GET_RESULTS: diff --git a/clientgui/BOINCTaskBar.cpp b/clientgui/BOINCTaskBar.cpp index 02242a5eb7..99d589205b 100644 --- a/clientgui/BOINCTaskBar.cpp +++ b/clientgui/BOINCTaskBar.cpp @@ -523,7 +523,7 @@ wxMenu *CTaskBarIcon::BuildContextMenu() { pMenu->AppendSeparator(); m_SnoozeMenuItem = pMenu->AppendCheckItem(ID_TB_SUSPEND, _("Snooze"), wxEmptyString); - if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) { + if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) { m_SnoozeGPUMenuItem = pMenu->AppendCheckItem(ID_TB_SUSPEND_GPU, _("Snooze GPU"), wxEmptyString); } @@ -633,7 +633,7 @@ void CTaskBarIcon::AdjustMenuItems(wxMenu* pMenu) { } } - if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) { + if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) { switch (status.gpu_mode) { case RUN_MODE_NEVER: switch (status.gpu_mode_perm) { @@ -660,7 +660,7 @@ void CTaskBarIcon::AdjustMenuItems(wxMenu* pMenu) { } break; } - if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) { + if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) { if (status.task_mode == RUN_MODE_NEVER) { m_SnoozeGPUMenuItem->Check(false); m_SnoozeGPUMenuItem->Enable(false); @@ -724,7 +724,7 @@ void CTaskBarIcon::UpdateTaskbarStatus() { } strMessage += wxT(".\n"); - if (!comp_suspended && (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu)) { + if (!comp_suspended && (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel)) { switch(status.gpu_suspend_reason) { case 0: strMessage += _("GPU computing is enabled"); diff --git a/clientgui/DlgItemProperties.cpp b/clientgui/DlgItemProperties.cpp index 648fdcb6ef..b6822fe1e4 100644 --- a/clientgui/DlgItemProperties.cpp +++ b/clientgui/DlgItemProperties.cpp @@ -280,7 +280,7 @@ void CDlgItemProperties::renderInfos(PROJECT* project_in) { project->rsc_desc_ati ); } - if (pDoc->state.have_intel_gpu) { + if (pDoc->state.have_intel) { show_rsc( wxString(proc_type_name(PROC_TYPE_INTEL_GPU), wxConvUTF8), project->rsc_desc_intel_gpu diff --git a/lib/gui_rpc_client.h b/lib/gui_rpc_client.h index d91584c82e..030eae66c4 100644 --- a/lib/gui_rpc_client.h +++ b/lib/gui_rpc_client.h @@ -389,7 +389,7 @@ struct CC_STATE { TIME_STATS time_stats; bool have_nvidia; // deprecated; include for compat (set by ) bool have_ati; // deprecated; include for compat - bool have_intel_gpu; + bool have_intel; CC_STATE(); ~CC_STATE(); diff --git a/lib/gui_rpc_client_ops.cpp b/lib/gui_rpc_client_ops.cpp index d0fa3eaafa..79fc590e9e 100644 --- a/lib/gui_rpc_client_ops.cpp +++ b/lib/gui_rpc_client_ops.cpp @@ -982,7 +982,7 @@ int CC_STATE::parse(XML_PARSER& xp) { } if (xp.parse_bool("have_cuda", have_nvidia)) continue; if (xp.parse_bool("have_ati", have_ati)) continue; - if (xp.parse_bool("have_intel_gpu", have_intel_gpu)) continue; + if (xp.parse_bool("have_intel", have_intel)) continue; } return 0; } @@ -1014,7 +1014,7 @@ void CC_STATE::clear() { host_info.clear_host_info(); have_nvidia = false; have_ati = false; - have_intel_gpu = false; + have_intel = false; } PROJECT* CC_STATE::lookup_project(const char* url) { diff --git a/sched/plan_class_spec.cpp b/sched/plan_class_spec.cpp index ebea4e5b6c..92ed1058cc 100644 --- a/sched/plan_class_spec.cpp +++ b/sched/plan_class_spec.cpp @@ -375,7 +375,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { // Intel GPU // } else if (!strcmp(gpu_type, "intel")) { - COPROC& cp = sreq.coprocs.intel_gpu; + COPROC& cp = sreq.coprocs.intel; cpp = &cp; if (!cp.count) { @@ -494,7 +494,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { } else if (!strcmp(gpu_type, "nvidia")) { hu.proc_type = PROC_TYPE_NVIDIA_GPU; hu.gpu_usage = gpu_usage; - } else if (!strcmp(gpu_type, "intel_gpu")) { + } else if (!strcmp(gpu_type, "intel")) { hu.proc_type = PROC_TYPE_INTEL_GPU; hu.gpu_usage = gpu_usage; } diff --git a/sched/sched_send.cpp b/sched/sched_send.cpp index 069d07c815..414082c5f2 100644 --- a/sched/sched_send.cpp +++ b/sched/sched_send.cpp @@ -1512,9 +1512,9 @@ void send_gpu_messages() { proc_type_name(PROC_TYPE_AMD_GPU) ); } - if (g_request->coprocs.intel_gpu.count && ssp->have_apps_for_proc_type[PROC_TYPE_INTEL_GPU]) { + if (g_request->coprocs.intel.count && ssp->have_apps_for_proc_type[PROC_TYPE_INTEL_GPU]) { send_gpu_property_messages(gpu_requirements[PROC_TYPE_INTEL_GPU], - g_request->coprocs.intel_gpu.opencl_prop.global_mem_size, + g_request->coprocs.intel.opencl_prop.global_mem_size, 0, proc_type_name(PROC_TYPE_INTEL_GPU) ); diff --git a/sched/sched_types.cpp b/sched/sched_types.cpp index dce64735c3..dc69bb1cae 100644 --- a/sched/sched_types.cpp +++ b/sched/sched_types.cpp @@ -98,7 +98,7 @@ int CLIENT_APP_VERSION::parse(XML_PARSER& xp) { host_usage.proc_type = PROC_TYPE_NVIDIA_GPU; } else if (!strcmp(coproc_req.type, "ATI")) { host_usage.proc_type = PROC_TYPE_AMD_GPU; - } else if (!strcmp(coproc_req.type, "INTEL_GPU")) { + } else if (!strcmp(coproc_req.type, "INTEL")) { host_usage.proc_type = PROC_TYPE_INTEL_GPU; } }