diff --git a/checkin_notes b/checkin_notes index 3a5a6b7573..fbb72b9f0c 100644 --- a/checkin_notes +++ b/checkin_notes @@ -7386,7 +7386,59 @@ Rom 7 Dec 2012 client/ gpu_detect.cpp -David 7 Dec 2012 +Rom 8 Dec 2012 + - client: Hook up the XML portion of the Intel GPU detection code so + the server scheduler knows about it. + - client: Print out the peak flops for the Intel GPU, the regular + OpenCL descriptions do not show peak flops. + + NOTE: At this point we should be supporting Intel GPUs as far as + detection and reporting its presence to the server goes. I don't + know about scheduling though. + + Thanks Tank Master for the interactive debug session in IRC. + + client/ + gpu_intel.cpp (Added) + client_state.cpp + cs_scheduler.cpp + cs_statefile.cpp + gpu_detect.cpp, .h + gpu_opencl.cpp + log_flags.cpp + lib/ + cc_config.cpp, .h + coproc.cpp, .h + +Rom 8 Dec 2012 + - client/server: fix build breaks I introduced last night with a variable + rename. + + client/ + acct_mgr.cpp + cpu_sched.cpp + gpu_detect.cpp + clientgui/ + AdvancedFrame.cpp + AsyncRPC.cpp + BOINCTaskBar.cpp + DlgItemProperties.cpp + lib/ + gui_rpc_client.h + gui_rpc_client_ops.cpp + sched/ + plan_class_spec.cpp + sched_send.cpp + sched_types.cpp + +Rom 8 Dec 2012 + - client/server: Make sure the GPU Type field is really classified as an Intel + GPU. + + lib/ + coproc.cpp + +David 8 Dec 2012 - client/manager: tweaks to Intel GPU code client/ acct_mgr.cpp diff --git a/client/Makefile.am b/client/Makefile.am index 5c753466ef..56829033cb 100644 --- a/client/Makefile.am +++ b/client/Makefile.am @@ -62,6 +62,7 @@ boinc_client_SOURCES = \ file_xfer.cpp \ gpu_amd.cpp \ gpu_detect.cpp \ + gpu_intel.cpp \ gpu_nvidia.cpp \ gpu_opencl.cpp \ gui_http.cpp \ diff --git a/client/client_state.cpp b/client/client_state.cpp index 280100d0e3..2dd9177a97 100644 --- a/client/client_state.cpp +++ b/client/client_state.cpp @@ -372,7 +372,7 @@ int CLIENT_STATE::init() { vector warnings; coprocs.get( config.use_all_gpus, descs, warnings, - config.ignore_nvidia_dev, config.ignore_ati_dev, config.ignore_intel_gpu_dev + config.ignore_nvidia_dev, config.ignore_ati_dev, config.ignore_intel_dev ); for (i=0; i& jobs) { if (coprocs.have_ati()) { copy_available_ram(coprocs.ati, GPU_TYPE_ATI); } - if (coprocs.have_intel_gpu()) { + if (coprocs.have_intel()) { copy_available_ram(coprocs.intel_gpu, GPU_TYPE_INTEL); } #endif diff --git a/client/gpu_detect.cpp b/client/gpu_detect.cpp index d462179f5f..7b811742e6 100644 --- a/client/gpu_detect.cpp +++ b/client/gpu_detect.cpp @@ -64,6 +64,7 @@ void segv_handler(int) { vector ati_gpus; vector nvidia_gpus; +vector intel_gpus; vector ati_opencls; vector nvidia_opencls; vector intel_gpu_opencls; @@ -72,7 +73,7 @@ void COPROCS::get( bool use_all, vector&descs, vector&warnings, vector& ignore_nvidia_dev, vector& ignore_ati_dev, - vector& ignore_intel_gpu_dev + vector& ignore_intel_dev ) { unsigned int i; char buf[256], buf2[256]; @@ -91,7 +92,13 @@ void COPROCS::get( warnings.push_back("Caught SIGSEGV in ATI GPU detection"); } try { - get_opencl(use_all, warnings, ignore_ati_dev, ignore_nvidia_dev, ignore_intel_gpu_dev); + intel.get(use_all, warnings, ignore_intel_dev); + } + catch (...) { + warnings.push_back("Caught SIGSEGV in INTEL GPU detection"); + } + try { + get_opencl(use_all, warnings, ignore_ati_dev, ignore_nvidia_dev, ignore_intel_dev); } catch (...) { warnings.push_back("Caught SIGSEGV in OpenCL detection"); @@ -110,10 +117,15 @@ void COPROCS::get( ati.get(use_all, warnings, ignore_ati_dev); } #endif + if (setjmp(resume)) { + warnings.push_back("Caught SIGSEGV in INTEL GPU detection"); + } else { + intel_gpu.get(use_all, warnings, ignore_intel_dev); + } if (setjmp(resume)) { warnings.push_back("Caught SIGSEGV in OpenCL detection"); } else { - get_opencl(use_all, warnings, ignore_ati_dev, ignore_nvidia_dev, ignore_intel_gpu_dev); + get_opencl(use_all, warnings, ignore_ati_dev, ignore_nvidia_dev, ignore_intel_dev); } signal(SIGSEGV, old_sig); #endif @@ -129,7 +141,11 @@ void COPROCS::get( break; case COPROC_UNUSED: default: - sprintf(buf2, "NVIDIA GPU %d (not used): %s", nvidia_gpus[i].device_num, buf); + if (nvidia_gpus[i].opencl_prop.is_used) { + sprintf(buf2, "NVIDIA GPU %d (OpenCL only): %s", nvidia_gpus[i].device_num, buf); + } else { + sprintf(buf2, "NVIDIA GPU %d (not used): %s", nvidia_gpus[i].device_num, buf); + } break; } descs.push_back(string(buf2)); @@ -146,7 +162,32 @@ void COPROCS::get( break; case COPROC_UNUSED: default: - sprintf(buf2, "ATI GPU %d: (not used) %s", ati_gpus[i].device_num, buf); + if (ati_gpus[i].opencl_prop.is_used) { + sprintf(buf2, "ATI GPU %d: (OpenCL only) %s", ati_gpus[i].device_num, buf); + } else { + sprintf(buf2, "ATI GPU %d: (not used) %s", ati_gpus[i].device_num, buf); + } + break; + } + descs.push_back(string(buf2)); + } + + for (i=0; i ati_gpus; extern vector nvidia_gpus; +extern vector intel_gpus; extern vector nvidia_opencls; extern vector ati_opencls; extern vector intel_gpu_opencls; diff --git a/client/gpu_intel.cpp b/client/gpu_intel.cpp new file mode 100644 index 0000000000..7c522affb1 --- /dev/null +++ b/client/gpu_intel.cpp @@ -0,0 +1,50 @@ +// This file is part of BOINC. +// http://boinc.berkeley.edu +// Copyright (C) 2012 University of California +// +// BOINC is free software; you can redistribute it and/or modify it +// under the terms of the GNU Lesser General Public License +// as published by the Free Software Foundation, +// either version 3 of the License, or (at your option) any later version. +// +// BOINC is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with BOINC. If not, see . + +// Detection of Intel GPUs +// + +#ifdef _WIN32 +#include "boinc_win.h" +#else +#ifdef __APPLE__ +// Suppress obsolete warning when building for OS 10.3.9 +#define DLOPEN_NO_WARN +#include +#endif +#include "config.h" +#include +#endif + +#include +#include + +using std::vector; +using std::string; + +#include "coproc.h" +#include "util.h" + +#include "client_msgs.h" +#include "gpu_detect.h" + +void COPROC_INTEL::get( + bool, + vector&, + vector& +) { +} diff --git a/client/gpu_opencl.cpp b/client/gpu_opencl.cpp index fe0bd38098..0e0588155b 100644 --- a/client/gpu_opencl.cpp +++ b/client/gpu_opencl.cpp @@ -115,7 +115,7 @@ void COPROCS::get_opencl( vector& warnings, vector& ignore_ati_dev, vector& ignore_nvidia_dev, - vector& ignore_intel_gpu_dev + vector& ignore_intel_dev ) { cl_int ciErrNum; cl_platform_id platforms[MAX_OPENCL_PLATFORMS]; @@ -337,11 +337,21 @@ void COPROCS::get_opencl( COPROC_INTEL c; c.opencl_prop = prop; + c.is_used = COPROC_UNUSED; + c.available_ram = prop.global_mem_size; + strcpy(c.name, prop.name); + strcpy(c.version, prop.opencl_driver_version); + c.set_peak_flops(); prop.peak_flops = c.peak_flops; - prop.opencl_available_ram = prop.global_mem_size; + intel_gpu_opencls.push_back(prop); + + // At present Intel GPUs only support OpenCL and do not have a native + // GPGPU framework, so treat each detected Intel OpenCL GPU device as + // a native device. + intel_gpus.push_back(c); } } } @@ -388,7 +398,7 @@ void COPROCS::get_opencl( strcpy(ati.name, ati.opencl_prop.name); } - intel_gpu.find_best_opencls(use_all, intel_gpu_opencls, ignore_intel_gpu_dev); + intel_gpu.find_best_opencls(use_all, intel_gpu_opencls, ignore_intel_dev); intel_gpu.available_ram = intel_gpu.opencl_prop.global_mem_size; strcpy(intel_gpu.name, intel_gpu.opencl_prop.name); diff --git a/client/log_flags.cpp b/client/log_flags.cpp index a51aa7779c..b233b79771 100644 --- a/client/log_flags.cpp +++ b/client/log_flags.cpp @@ -191,7 +191,7 @@ void CONFIG::show() { } show_gpu_ignore(ignore_nvidia_dev, GPU_TYPE_NVIDIA); show_gpu_ignore(ignore_ati_dev, GPU_TYPE_ATI); - show_gpu_ignore(ignore_intel_gpu_dev, GPU_TYPE_ATI); + show_gpu_ignore(ignore_intel_dev, GPU_TYPE_ATI); for (i=0; istate.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) { + if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) { #ifndef __WXGTK__ menuActivity->AppendSeparator(); @@ -1822,7 +1822,7 @@ void CAdvancedFrame::OnFrameRender(wxTimerEvent& WXUNUSED(event)) { CC_STATUS status; if ((pDoc->IsConnected()) && (0 == pDoc->GetCoreClientStatus(status))) { UpdateActivityModeControls(status); - if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) { + if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) { UpdateGPUModeControls(status); } UpdateNetworkModeControls(status); diff --git a/clientgui/AsyncRPC.cpp b/clientgui/AsyncRPC.cpp index 1047437d7d..6832333ad1 100644 --- a/clientgui/AsyncRPC.cpp +++ b/clientgui/AsyncRPC.cpp @@ -1006,7 +1006,7 @@ void CMainDocument::HandleCompletedRPC() { exchangeBuf->executing_as_daemon = arg1->executing_as_daemon; exchangeBuf->have_nvidia = arg1->have_nvidia; exchangeBuf->have_ati = arg1->have_ati; - exchangeBuf->have_intel_gpu = arg1->have_intel_gpu; + exchangeBuf->have_intel = arg1->have_intel; } break; case RPC_GET_RESULTS: diff --git a/clientgui/BOINCTaskBar.cpp b/clientgui/BOINCTaskBar.cpp index 02242a5eb7..99d589205b 100644 --- a/clientgui/BOINCTaskBar.cpp +++ b/clientgui/BOINCTaskBar.cpp @@ -523,7 +523,7 @@ wxMenu *CTaskBarIcon::BuildContextMenu() { pMenu->AppendSeparator(); m_SnoozeMenuItem = pMenu->AppendCheckItem(ID_TB_SUSPEND, _("Snooze"), wxEmptyString); - if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) { + if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) { m_SnoozeGPUMenuItem = pMenu->AppendCheckItem(ID_TB_SUSPEND_GPU, _("Snooze GPU"), wxEmptyString); } @@ -633,7 +633,7 @@ void CTaskBarIcon::AdjustMenuItems(wxMenu* pMenu) { } } - if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) { + if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) { switch (status.gpu_mode) { case RUN_MODE_NEVER: switch (status.gpu_mode_perm) { @@ -660,7 +660,7 @@ void CTaskBarIcon::AdjustMenuItems(wxMenu* pMenu) { } break; } - if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) { + if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) { if (status.task_mode == RUN_MODE_NEVER) { m_SnoozeGPUMenuItem->Check(false); m_SnoozeGPUMenuItem->Enable(false); @@ -724,7 +724,7 @@ void CTaskBarIcon::UpdateTaskbarStatus() { } strMessage += wxT(".\n"); - if (!comp_suspended && (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu)) { + if (!comp_suspended && (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel)) { switch(status.gpu_suspend_reason) { case 0: strMessage += _("GPU computing is enabled"); diff --git a/clientgui/DlgItemProperties.cpp b/clientgui/DlgItemProperties.cpp index 648fdcb6ef..b6822fe1e4 100644 --- a/clientgui/DlgItemProperties.cpp +++ b/clientgui/DlgItemProperties.cpp @@ -280,7 +280,7 @@ void CDlgItemProperties::renderInfos(PROJECT* project_in) { project->rsc_desc_ati ); } - if (pDoc->state.have_intel_gpu) { + if (pDoc->state.have_intel) { show_rsc( wxString(proc_type_name(PROC_TYPE_INTEL_GPU), wxConvUTF8), project->rsc_desc_intel_gpu diff --git a/lib/cc_config.cpp b/lib/cc_config.cpp index 81dd0efaed..26f18a275e 100644 --- a/lib/cc_config.cpp +++ b/lib/cc_config.cpp @@ -218,7 +218,7 @@ void CONFIG::defaults() { http_transfer_timeout_bps = 10; ignore_nvidia_dev.clear(); ignore_ati_dev.clear(); - ignore_intel_gpu_dev.clear(); + ignore_intel_dev.clear(); max_file_xfers = 8; max_file_xfers_per_project = 2; max_stderr_file_size = 0; @@ -288,7 +288,7 @@ int CONFIG::parse_options(XML_PARSER& xp) { exclusive_gpu_apps.clear(); ignore_nvidia_dev.clear(); ignore_ati_dev.clear(); - ignore_intel_gpu_dev.clear(); + ignore_intel_dev.clear(); exclude_gpus.clear(); while (!xp.get_tag()) { @@ -372,7 +372,7 @@ int CONFIG::parse_options(XML_PARSER& xp) { continue; } if (xp.parse_int("ignore_intel_gpu_dev", n)) { - ignore_intel_gpu_dev.push_back(n); + ignore_intel_dev.push_back(n); continue; } if (xp.parse_int("max_file_xfers", max_file_xfers)) continue; @@ -558,10 +558,10 @@ int CONFIG::write(MIOFILE& out, LOG_FLAGS& log_flags) { ); } - for (i=0; i%d\n", - ignore_intel_gpu_dev[i] + ignore_intel_dev[i] ); } diff --git a/lib/cc_config.h b/lib/cc_config.h index f1a32a0456..3b1187acbd 100644 --- a/lib/cc_config.h +++ b/lib/cc_config.h @@ -157,7 +157,7 @@ struct CONFIG { int http_transfer_timeout; std::vector ignore_ati_dev; std::vector ignore_nvidia_dev; - std::vector ignore_intel_gpu_dev; + std::vector ignore_intel_dev; int max_file_xfers; int max_file_xfers_per_project; int max_stderr_file_size; diff --git a/lib/coproc.cpp b/lib/coproc.cpp index 5b6fa92468..cd4b0de2d4 100644 --- a/lib/coproc.cpp +++ b/lib/coproc.cpp @@ -324,6 +324,12 @@ void COPROCS::summary_string(char* buf, int len) { ); strlcat(buf, buf2, len); } + if (intel_gpu.count) { + sprintf(buf2,"[INTEL|%s|%d|%fMB|%s]", + intel_gpu.name, intel_gpu.count, intel_gpu.opencl_prop.global_mem_size/MEGA, intel_gpu.version + ); + strlcat(buf, buf2, len); + } } int COPROCS::parse(XML_PARSER& xp) { @@ -936,7 +942,7 @@ void COPROC_INTEL::write_xml(MIOFILE& f, bool scheduler_rpc) { void COPROC_INTEL::clear() { COPROC::clear(); - strcpy(type, proc_type_name_xml(PROC_TYPE_AMD_GPU)); + strcpy(type, proc_type_name_xml(PROC_TYPE_INTEL_GPU)); estimated_delay = -1; strcpy(name, ""); strcpy(version, ""); @@ -976,6 +982,11 @@ int COPROC_INTEL::parse(XML_PARSER& xp) { return ERR_XML_PARSE; } +void COPROC_INTEL::description(char* buf) { + sprintf(buf, "%s (version %s, %.0fMB, %.0fMB available, %.0f GFLOPS peak)", + name, version, ((double)opencl_prop.global_mem_size)/MEGA, available_ram/MEGA, peak_flops/1.e9 + ); +} // http://en.wikipedia.org/wiki/Comparison_of_Intel_graphics_processing_units says: // The raw performance of integrated GPU, in single-precision FLOPS, @@ -990,12 +1001,12 @@ void COPROC_INTEL::set_peak_flops() { if (opencl_prop.max_compute_units) { x = opencl_prop.max_compute_units * 8 * opencl_prop.max_clock_frequency * 1e6; } - peak_flops = (x>0)?x:45e9; + peak_flops = (x>0)?x:45e9; } //TODO: Fix this void COPROC_INTEL::fake(double ram, double avail_ram, int n) { - strcpy(type, proc_type_name_xml(PROC_TYPE_AMD_GPU)); + strcpy(type, proc_type_name_xml(PROC_TYPE_INTEL_GPU)); strcpy(version, "1.4.3"); strcpy(name, "foobar"); count = n; @@ -1013,7 +1024,7 @@ const char* proc_type_name_xml(int pt) { case PROC_TYPE_CPU: return "CPU"; case PROC_TYPE_NVIDIA_GPU: return "NVIDIA"; case PROC_TYPE_AMD_GPU: return "ATI"; - case PROC_TYPE_INTEL_GPU: return "intel_gpu"; + case PROC_TYPE_INTEL_GPU: return "INTEL"; } return "unknown"; } diff --git a/lib/coproc.h b/lib/coproc.h index 765b23c268..6e059b1b4b 100644 --- a/lib/coproc.h +++ b/lib/coproc.h @@ -166,7 +166,7 @@ struct OPENCL_DEVICE_PROP { void write_xml(MIOFILE&); #endif int parse(XML_PARSER&); -void description(char* buf, const char* type); + void description(char* buf, const char* type); }; @@ -378,6 +378,7 @@ struct COPROC_ATI : public COPROC { struct COPROC_INTEL : public COPROC { char name[256]; char version[50]; + double global_mem_size; COPROC_USAGE is_used; // temp used in scan process #ifndef _USING_FCGI_ @@ -386,6 +387,12 @@ struct COPROC_INTEL : public COPROC { COPROC_INTEL(): COPROC() { strcpy(type, proc_type_name_xml(PROC_TYPE_INTEL_GPU)); } + void get( + bool use_all, + std::vector&, + std::vector& ignore_devs + ); + void description(char*); void clear(); int parse(XML_PARSER&); void set_peak_flops(); @@ -446,6 +453,7 @@ struct COPROCS { } nvidia.clear(); ati.clear(); + intel_gpu.clear(); COPROC c; strcpy(c.type, "CPU"); add(c); diff --git a/lib/gui_rpc_client.h b/lib/gui_rpc_client.h index d91584c82e..030eae66c4 100644 --- a/lib/gui_rpc_client.h +++ b/lib/gui_rpc_client.h @@ -389,7 +389,7 @@ struct CC_STATE { TIME_STATS time_stats; bool have_nvidia; // deprecated; include for compat (set by ) bool have_ati; // deprecated; include for compat - bool have_intel_gpu; + bool have_intel; CC_STATE(); ~CC_STATE(); diff --git a/lib/gui_rpc_client_ops.cpp b/lib/gui_rpc_client_ops.cpp index d0fa3eaafa..79fc590e9e 100644 --- a/lib/gui_rpc_client_ops.cpp +++ b/lib/gui_rpc_client_ops.cpp @@ -982,7 +982,7 @@ int CC_STATE::parse(XML_PARSER& xp) { } if (xp.parse_bool("have_cuda", have_nvidia)) continue; if (xp.parse_bool("have_ati", have_ati)) continue; - if (xp.parse_bool("have_intel_gpu", have_intel_gpu)) continue; + if (xp.parse_bool("have_intel", have_intel)) continue; } return 0; } @@ -1014,7 +1014,7 @@ void CC_STATE::clear() { host_info.clear_host_info(); have_nvidia = false; have_ati = false; - have_intel_gpu = false; + have_intel = false; } PROJECT* CC_STATE::lookup_project(const char* url) { diff --git a/sched/plan_class_spec.cpp b/sched/plan_class_spec.cpp index ebea4e5b6c..5a36663275 100644 --- a/sched/plan_class_spec.cpp +++ b/sched/plan_class_spec.cpp @@ -494,7 +494,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) { } else if (!strcmp(gpu_type, "nvidia")) { hu.proc_type = PROC_TYPE_NVIDIA_GPU; hu.gpu_usage = gpu_usage; - } else if (!strcmp(gpu_type, "intel_gpu")) { + } else if (!strcmp(gpu_type, "intel")) { hu.proc_type = PROC_TYPE_INTEL_GPU; hu.gpu_usage = gpu_usage; } diff --git a/sched/sched_types.cpp b/sched/sched_types.cpp index dce64735c3..dc69bb1cae 100644 --- a/sched/sched_types.cpp +++ b/sched/sched_types.cpp @@ -98,7 +98,7 @@ int CLIENT_APP_VERSION::parse(XML_PARSER& xp) { host_usage.proc_type = PROC_TYPE_NVIDIA_GPU; } else if (!strcmp(coproc_req.type, "ATI")) { host_usage.proc_type = PROC_TYPE_AMD_GPU; - } else if (!strcmp(coproc_req.type, "INTEL_GPU")) { + } else if (!strcmp(coproc_req.type, "INTEL")) { host_usage.proc_type = PROC_TYPE_INTEL_GPU; } } diff --git a/win_build/boinc_cli.vcproj b/win_build/boinc_cli.vcproj index deea5757c7..de5f0e32cf 100644 --- a/win_build/boinc_cli.vcproj +++ b/win_build/boinc_cli.vcproj @@ -691,6 +691,10 @@ RelativePath="..\client\gpu_detect.cpp" > + +