Merge branch 'master' of ssh://boinc.berkeley.edu/boinc

Conflicts:
	checkin_notes
	client/acct_mgr.cpp
	client/cs_statefile.cpp
	client/gpu_opencl.cpp
	lib/coproc.cpp

Additional changes:
	client/Makefile.am

Dropped changes:
	client/cs_scheduler.cpp (516eff6)
	sched/sched_send.cpp (2dd8288)
This commit is contained in:
Oliver Bock 2013-03-04 15:32:16 +01:00
commit 508b9b572b
22 changed files with 218 additions and 39 deletions

View File

@ -7386,7 +7386,59 @@ Rom 7 Dec 2012
client/
gpu_detect.cpp
David 7 Dec 2012
Rom 8 Dec 2012
- client: Hook up the XML portion of the Intel GPU detection code so
the server scheduler knows about it.
- client: Print out the peak flops for the Intel GPU, the regular
OpenCL descriptions do not show peak flops.
NOTE: At this point we should be supporting Intel GPUs as far as
detection and reporting its presence to the server goes. I don't
know about scheduling though.
Thanks Tank Master for the interactive debug session in IRC.
client/
gpu_intel.cpp (Added)
client_state.cpp
cs_scheduler.cpp
cs_statefile.cpp
gpu_detect.cpp, .h
gpu_opencl.cpp
log_flags.cpp
lib/
cc_config.cpp, .h
coproc.cpp, .h
Rom 8 Dec 2012
- client/server: fix build breaks I introduced last night with a variable
rename.
client/
acct_mgr.cpp
cpu_sched.cpp
gpu_detect.cpp
clientgui/
AdvancedFrame.cpp
AsyncRPC.cpp
BOINCTaskBar.cpp
DlgItemProperties.cpp
lib/
gui_rpc_client.h
gui_rpc_client_ops.cpp
sched/
plan_class_spec.cpp
sched_send.cpp
sched_types.cpp
Rom 8 Dec 2012
- client/server: Make sure the GPU Type field is really classified as an Intel
GPU.
lib/
coproc.cpp
David 8 Dec 2012
- client/manager: tweaks to Intel GPU code
client/
acct_mgr.cpp

View File

@ -62,6 +62,7 @@ boinc_client_SOURCES = \
file_xfer.cpp \
gpu_amd.cpp \
gpu_detect.cpp \
gpu_intel.cpp \
gpu_nvidia.cpp \
gpu_opencl.cpp \
gui_http.cpp \

View File

@ -372,7 +372,7 @@ int CLIENT_STATE::init() {
vector<string> warnings;
coprocs.get(
config.use_all_gpus, descs, warnings,
config.ignore_nvidia_dev, config.ignore_ati_dev, config.ignore_intel_gpu_dev
config.ignore_nvidia_dev, config.ignore_ati_dev, config.ignore_intel_dev
);
for (i=0; i<descs.size(); i++) {
msg_printf(NULL, MSG_INFO, "%s", descs[i].c_str());

View File

@ -1347,7 +1347,7 @@ static inline void assign_coprocs(vector<RESULT*>& jobs) {
if (coprocs.have_ati()) {
copy_available_ram(coprocs.ati, GPU_TYPE_ATI);
}
if (coprocs.have_intel_gpu()) {
if (coprocs.have_intel()) {
copy_available_ram(coprocs.intel_gpu, GPU_TYPE_INTEL);
}
#endif

View File

@ -64,6 +64,7 @@ void segv_handler(int) {
vector<COPROC_ATI> ati_gpus;
vector<COPROC_NVIDIA> nvidia_gpus;
vector<COPROC_INTEL> intel_gpus;
vector<OPENCL_DEVICE_PROP> ati_opencls;
vector<OPENCL_DEVICE_PROP> nvidia_opencls;
vector<OPENCL_DEVICE_PROP> intel_gpu_opencls;
@ -72,7 +73,7 @@ void COPROCS::get(
bool use_all, vector<string>&descs, vector<string>&warnings,
vector<int>& ignore_nvidia_dev,
vector<int>& ignore_ati_dev,
vector<int>& ignore_intel_gpu_dev
vector<int>& ignore_intel_dev
) {
unsigned int i;
char buf[256], buf2[256];
@ -91,7 +92,13 @@ void COPROCS::get(
warnings.push_back("Caught SIGSEGV in ATI GPU detection");
}
try {
get_opencl(use_all, warnings, ignore_ati_dev, ignore_nvidia_dev, ignore_intel_gpu_dev);
intel.get(use_all, warnings, ignore_intel_dev);
}
catch (...) {
warnings.push_back("Caught SIGSEGV in INTEL GPU detection");
}
try {
get_opencl(use_all, warnings, ignore_ati_dev, ignore_nvidia_dev, ignore_intel_dev);
}
catch (...) {
warnings.push_back("Caught SIGSEGV in OpenCL detection");
@ -110,10 +117,15 @@ void COPROCS::get(
ati.get(use_all, warnings, ignore_ati_dev);
}
#endif
if (setjmp(resume)) {
warnings.push_back("Caught SIGSEGV in INTEL GPU detection");
} else {
intel_gpu.get(use_all, warnings, ignore_intel_dev);
}
if (setjmp(resume)) {
warnings.push_back("Caught SIGSEGV in OpenCL detection");
} else {
get_opencl(use_all, warnings, ignore_ati_dev, ignore_nvidia_dev, ignore_intel_gpu_dev);
get_opencl(use_all, warnings, ignore_ati_dev, ignore_nvidia_dev, ignore_intel_dev);
}
signal(SIGSEGV, old_sig);
#endif
@ -129,7 +141,11 @@ void COPROCS::get(
break;
case COPROC_UNUSED:
default:
sprintf(buf2, "NVIDIA GPU %d (not used): %s", nvidia_gpus[i].device_num, buf);
if (nvidia_gpus[i].opencl_prop.is_used) {
sprintf(buf2, "NVIDIA GPU %d (OpenCL only): %s", nvidia_gpus[i].device_num, buf);
} else {
sprintf(buf2, "NVIDIA GPU %d (not used): %s", nvidia_gpus[i].device_num, buf);
}
break;
}
descs.push_back(string(buf2));
@ -146,7 +162,32 @@ void COPROCS::get(
break;
case COPROC_UNUSED:
default:
sprintf(buf2, "ATI GPU %d: (not used) %s", ati_gpus[i].device_num, buf);
if (ati_gpus[i].opencl_prop.is_used) {
sprintf(buf2, "ATI GPU %d: (OpenCL only) %s", ati_gpus[i].device_num, buf);
} else {
sprintf(buf2, "ATI GPU %d: (not used) %s", ati_gpus[i].device_num, buf);
}
break;
}
descs.push_back(string(buf2));
}
for (i=0; i<intel_gpus.size(); i++) {
intel_gpus[i].description(buf);
switch(intel_gpus[i].is_used) {
case COPROC_IGNORED:
sprintf(buf2, "INTEL GPU %d (ignored by config): %s", intel_gpus[i].device_num, buf);
break;
case COPROC_USED:
sprintf(buf2, "INTEL GPU %d: %s", intel_gpus[i].device_num, buf);
break;
case COPROC_UNUSED:
default:
if (intel_gpus[i].opencl_prop.is_used) {
sprintf(buf2, "INTEL GPU %d: (OpenCL only) %s", intel_gpus[i].device_num, buf);
} else {
sprintf(buf2, "INTEL GPU %d: (not used) %s", intel_gpus[i].device_num, buf);
}
break;
}
descs.push_back(string(buf2));
@ -175,6 +216,7 @@ void COPROCS::get(
ati_gpus.clear();
nvidia_gpus.clear();
intel_gpus.clear();
ati_opencls.clear();
nvidia_opencls.clear();
intel_gpu_opencls.clear();

View File

@ -17,6 +17,7 @@
extern vector<COPROC_ATI> ati_gpus;
extern vector<COPROC_NVIDIA> nvidia_gpus;
extern vector<COPROC_INTEL> intel_gpus;
extern vector<OPENCL_DEVICE_PROP> nvidia_opencls;
extern vector<OPENCL_DEVICE_PROP> ati_opencls;
extern vector<OPENCL_DEVICE_PROP> intel_gpu_opencls;

50
client/gpu_intel.cpp Normal file
View File

@ -0,0 +1,50 @@
// This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2012 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
// Detection of Intel GPUs
//
#ifdef _WIN32
#include "boinc_win.h"
#else
#ifdef __APPLE__
// Suppress obsolete warning when building for OS 10.3.9
#define DLOPEN_NO_WARN
#include <mach-o/dyld.h>
#endif
#include "config.h"
#include <dlfcn.h>
#endif
#include <vector>
#include <string>
using std::vector;
using std::string;
#include "coproc.h"
#include "util.h"
#include "client_msgs.h"
#include "gpu_detect.h"
void COPROC_INTEL::get(
bool,
vector<string>&,
vector<int>&
) {
}

View File

@ -115,7 +115,7 @@ void COPROCS::get_opencl(
vector<string>& warnings,
vector<int>& ignore_ati_dev,
vector<int>& ignore_nvidia_dev,
vector<int>& ignore_intel_gpu_dev
vector<int>& ignore_intel_dev
) {
cl_int ciErrNum;
cl_platform_id platforms[MAX_OPENCL_PLATFORMS];
@ -337,11 +337,21 @@ void COPROCS::get_opencl(
COPROC_INTEL c;
c.opencl_prop = prop;
c.is_used = COPROC_UNUSED;
c.available_ram = prop.global_mem_size;
strcpy(c.name, prop.name);
strcpy(c.version, prop.opencl_driver_version);
c.set_peak_flops();
prop.peak_flops = c.peak_flops;
prop.opencl_available_ram = prop.global_mem_size;
intel_gpu_opencls.push_back(prop);
// At present Intel GPUs only support OpenCL and do not have a native
// GPGPU framework, so treat each detected Intel OpenCL GPU device as
// a native device.
intel_gpus.push_back(c);
}
}
}
@ -388,7 +398,7 @@ void COPROCS::get_opencl(
strcpy(ati.name, ati.opencl_prop.name);
}
intel_gpu.find_best_opencls(use_all, intel_gpu_opencls, ignore_intel_gpu_dev);
intel_gpu.find_best_opencls(use_all, intel_gpu_opencls, ignore_intel_dev);
intel_gpu.available_ram = intel_gpu.opencl_prop.global_mem_size;
strcpy(intel_gpu.name, intel_gpu.opencl_prop.name);

View File

@ -191,7 +191,7 @@ void CONFIG::show() {
}
show_gpu_ignore(ignore_nvidia_dev, GPU_TYPE_NVIDIA);
show_gpu_ignore(ignore_ati_dev, GPU_TYPE_ATI);
show_gpu_ignore(ignore_intel_gpu_dev, GPU_TYPE_ATI);
show_gpu_ignore(ignore_intel_dev, GPU_TYPE_ATI);
for (i=0; i<exclude_gpus.size(); i++) {
show_exclude_gpu(exclude_gpus[i]);
}
@ -261,7 +261,7 @@ int CONFIG::parse_options_client(XML_PARSER& xp) {
exclusive_gpu_apps.clear();
ignore_nvidia_dev.clear();
ignore_ati_dev.clear();
ignore_intel_gpu_dev.clear();
ignore_intel_dev.clear();
while (!xp.get_tag()) {
if (!xp.is_tag) {
@ -365,8 +365,8 @@ int CONFIG::parse_options_client(XML_PARSER& xp) {
ignore_ati_dev.push_back(n);
continue;
}
if (xp.parse_int("ignore_intel_gpu_dev", n)) {
ignore_intel_gpu_dev.push_back(n);
if (xp.parse_int("ignore_intel_dev", n)) {
ignore_intel_dev.push_back(n);
continue;
}
if (xp.parse_int("max_file_xfers", max_file_xfers)) continue;

View File

@ -470,7 +470,7 @@ bool CAdvancedFrame::CreateMenu() {
_("Stop work regardless of preferences")
);
if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) {
if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) {
#ifndef __WXGTK__
menuActivity->AppendSeparator();
@ -1822,7 +1822,7 @@ void CAdvancedFrame::OnFrameRender(wxTimerEvent& WXUNUSED(event)) {
CC_STATUS status;
if ((pDoc->IsConnected()) && (0 == pDoc->GetCoreClientStatus(status))) {
UpdateActivityModeControls(status);
if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) {
if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) {
UpdateGPUModeControls(status);
}
UpdateNetworkModeControls(status);

View File

@ -1006,7 +1006,7 @@ void CMainDocument::HandleCompletedRPC() {
exchangeBuf->executing_as_daemon = arg1->executing_as_daemon;
exchangeBuf->have_nvidia = arg1->have_nvidia;
exchangeBuf->have_ati = arg1->have_ati;
exchangeBuf->have_intel_gpu = arg1->have_intel_gpu;
exchangeBuf->have_intel = arg1->have_intel;
}
break;
case RPC_GET_RESULTS:

View File

@ -523,7 +523,7 @@ wxMenu *CTaskBarIcon::BuildContextMenu() {
pMenu->AppendSeparator();
m_SnoozeMenuItem = pMenu->AppendCheckItem(ID_TB_SUSPEND, _("Snooze"), wxEmptyString);
if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) {
if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) {
m_SnoozeGPUMenuItem = pMenu->AppendCheckItem(ID_TB_SUSPEND_GPU, _("Snooze GPU"), wxEmptyString);
}
@ -633,7 +633,7 @@ void CTaskBarIcon::AdjustMenuItems(wxMenu* pMenu) {
}
}
if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) {
if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) {
switch (status.gpu_mode) {
case RUN_MODE_NEVER:
switch (status.gpu_mode_perm) {
@ -660,7 +660,7 @@ void CTaskBarIcon::AdjustMenuItems(wxMenu* pMenu) {
}
break;
}
if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu) {
if (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel) {
if (status.task_mode == RUN_MODE_NEVER) {
m_SnoozeGPUMenuItem->Check(false);
m_SnoozeGPUMenuItem->Enable(false);
@ -724,7 +724,7 @@ void CTaskBarIcon::UpdateTaskbarStatus() {
}
strMessage += wxT(".\n");
if (!comp_suspended && (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel_gpu)) {
if (!comp_suspended && (pDoc->state.have_nvidia || pDoc->state.have_ati || pDoc->state.have_intel)) {
switch(status.gpu_suspend_reason) {
case 0:
strMessage += _("GPU computing is enabled");

View File

@ -280,7 +280,7 @@ void CDlgItemProperties::renderInfos(PROJECT* project_in) {
project->rsc_desc_ati
);
}
if (pDoc->state.have_intel_gpu) {
if (pDoc->state.have_intel) {
show_rsc(
wxString(proc_type_name(PROC_TYPE_INTEL_GPU), wxConvUTF8),
project->rsc_desc_intel_gpu

View File

@ -218,7 +218,7 @@ void CONFIG::defaults() {
http_transfer_timeout_bps = 10;
ignore_nvidia_dev.clear();
ignore_ati_dev.clear();
ignore_intel_gpu_dev.clear();
ignore_intel_dev.clear();
max_file_xfers = 8;
max_file_xfers_per_project = 2;
max_stderr_file_size = 0;
@ -288,7 +288,7 @@ int CONFIG::parse_options(XML_PARSER& xp) {
exclusive_gpu_apps.clear();
ignore_nvidia_dev.clear();
ignore_ati_dev.clear();
ignore_intel_gpu_dev.clear();
ignore_intel_dev.clear();
exclude_gpus.clear();
while (!xp.get_tag()) {
@ -372,7 +372,7 @@ int CONFIG::parse_options(XML_PARSER& xp) {
continue;
}
if (xp.parse_int("ignore_intel_gpu_dev", n)) {
ignore_intel_gpu_dev.push_back(n);
ignore_intel_dev.push_back(n);
continue;
}
if (xp.parse_int("max_file_xfers", max_file_xfers)) continue;
@ -558,10 +558,10 @@ int CONFIG::write(MIOFILE& out, LOG_FLAGS& log_flags) {
);
}
for (i=0; i<ignore_intel_gpu_dev.size(); ++i) {
for (i=0; i<ignore_intel_dev.size(); ++i) {
out.printf(
" <ignore_intel_gpu_dev>%d</ignore_intel_gpu_dev>\n",
ignore_intel_gpu_dev[i]
ignore_intel_dev[i]
);
}

View File

@ -157,7 +157,7 @@ struct CONFIG {
int http_transfer_timeout;
std::vector<int> ignore_ati_dev;
std::vector<int> ignore_nvidia_dev;
std::vector<int> ignore_intel_gpu_dev;
std::vector<int> ignore_intel_dev;
int max_file_xfers;
int max_file_xfers_per_project;
int max_stderr_file_size;

View File

@ -324,6 +324,12 @@ void COPROCS::summary_string(char* buf, int len) {
);
strlcat(buf, buf2, len);
}
if (intel_gpu.count) {
sprintf(buf2,"[INTEL|%s|%d|%fMB|%s]",
intel_gpu.name, intel_gpu.count, intel_gpu.opencl_prop.global_mem_size/MEGA, intel_gpu.version
);
strlcat(buf, buf2, len);
}
}
int COPROCS::parse(XML_PARSER& xp) {
@ -936,7 +942,7 @@ void COPROC_INTEL::write_xml(MIOFILE& f, bool scheduler_rpc) {
void COPROC_INTEL::clear() {
COPROC::clear();
strcpy(type, proc_type_name_xml(PROC_TYPE_AMD_GPU));
strcpy(type, proc_type_name_xml(PROC_TYPE_INTEL_GPU));
estimated_delay = -1;
strcpy(name, "");
strcpy(version, "");
@ -976,6 +982,11 @@ int COPROC_INTEL::parse(XML_PARSER& xp) {
return ERR_XML_PARSE;
}
void COPROC_INTEL::description(char* buf) {
sprintf(buf, "%s (version %s, %.0fMB, %.0fMB available, %.0f GFLOPS peak)",
name, version, ((double)opencl_prop.global_mem_size)/MEGA, available_ram/MEGA, peak_flops/1.e9
);
}
// http://en.wikipedia.org/wiki/Comparison_of_Intel_graphics_processing_units says:
// The raw performance of integrated GPU, in single-precision FLOPS,
@ -990,12 +1001,12 @@ void COPROC_INTEL::set_peak_flops() {
if (opencl_prop.max_compute_units) {
x = opencl_prop.max_compute_units * 8 * opencl_prop.max_clock_frequency * 1e6;
}
peak_flops = (x>0)?x:45e9;
peak_flops = (x>0)?x:45e9;
}
//TODO: Fix this
void COPROC_INTEL::fake(double ram, double avail_ram, int n) {
strcpy(type, proc_type_name_xml(PROC_TYPE_AMD_GPU));
strcpy(type, proc_type_name_xml(PROC_TYPE_INTEL_GPU));
strcpy(version, "1.4.3");
strcpy(name, "foobar");
count = n;
@ -1013,7 +1024,7 @@ const char* proc_type_name_xml(int pt) {
case PROC_TYPE_CPU: return "CPU";
case PROC_TYPE_NVIDIA_GPU: return "NVIDIA";
case PROC_TYPE_AMD_GPU: return "ATI";
case PROC_TYPE_INTEL_GPU: return "intel_gpu";
case PROC_TYPE_INTEL_GPU: return "INTEL";
}
return "unknown";
}

View File

@ -166,7 +166,7 @@ struct OPENCL_DEVICE_PROP {
void write_xml(MIOFILE&);
#endif
int parse(XML_PARSER&);
void description(char* buf, const char* type);
void description(char* buf, const char* type);
};
@ -378,6 +378,7 @@ struct COPROC_ATI : public COPROC {
struct COPROC_INTEL : public COPROC {
char name[256];
char version[50];
double global_mem_size;
COPROC_USAGE is_used; // temp used in scan process
#ifndef _USING_FCGI_
@ -386,6 +387,12 @@ struct COPROC_INTEL : public COPROC {
COPROC_INTEL(): COPROC() {
strcpy(type, proc_type_name_xml(PROC_TYPE_INTEL_GPU));
}
void get(
bool use_all,
std::vector<std::string>&,
std::vector<int>& ignore_devs
);
void description(char*);
void clear();
int parse(XML_PARSER&);
void set_peak_flops();
@ -446,6 +453,7 @@ struct COPROCS {
}
nvidia.clear();
ati.clear();
intel_gpu.clear();
COPROC c;
strcpy(c.type, "CPU");
add(c);

View File

@ -389,7 +389,7 @@ struct CC_STATE {
TIME_STATS time_stats;
bool have_nvidia; // deprecated; include for compat (set by <have_cuda/>)
bool have_ati; // deprecated; include for compat
bool have_intel_gpu;
bool have_intel;
CC_STATE();
~CC_STATE();

View File

@ -982,7 +982,7 @@ int CC_STATE::parse(XML_PARSER& xp) {
}
if (xp.parse_bool("have_cuda", have_nvidia)) continue;
if (xp.parse_bool("have_ati", have_ati)) continue;
if (xp.parse_bool("have_intel_gpu", have_intel_gpu)) continue;
if (xp.parse_bool("have_intel", have_intel)) continue;
}
return 0;
}
@ -1014,7 +1014,7 @@ void CC_STATE::clear() {
host_info.clear_host_info();
have_nvidia = false;
have_ati = false;
have_intel_gpu = false;
have_intel = false;
}
PROJECT* CC_STATE::lookup_project(const char* url) {

View File

@ -494,7 +494,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
} else if (!strcmp(gpu_type, "nvidia")) {
hu.proc_type = PROC_TYPE_NVIDIA_GPU;
hu.gpu_usage = gpu_usage;
} else if (!strcmp(gpu_type, "intel_gpu")) {
} else if (!strcmp(gpu_type, "intel")) {
hu.proc_type = PROC_TYPE_INTEL_GPU;
hu.gpu_usage = gpu_usage;
}

View File

@ -98,7 +98,7 @@ int CLIENT_APP_VERSION::parse(XML_PARSER& xp) {
host_usage.proc_type = PROC_TYPE_NVIDIA_GPU;
} else if (!strcmp(coproc_req.type, "ATI")) {
host_usage.proc_type = PROC_TYPE_AMD_GPU;
} else if (!strcmp(coproc_req.type, "INTEL_GPU")) {
} else if (!strcmp(coproc_req.type, "INTEL")) {
host_usage.proc_type = PROC_TYPE_INTEL_GPU;
}
}

View File

@ -691,6 +691,10 @@
RelativePath="..\client\gpu_detect.cpp"
>
</File>
<File
RelativePath="..\client\gpu_intel.cpp"
>
</File>
<File
RelativePath="..\client\gpu_nvidia.cpp"
>