mirror of https://github.com/BOINC/boinc.git
- client: remove "device" entry from CUDA_DEVICE_PROP,
and change types of mem-size fields from int to double. These fields are size_t in NVIDIA's version of this; however, cuDeviceGetAttribute() returns them as int, so I don't see where this makes any difference. - client: fix bug in handling of <no_rsc_apps> element. - scheduler: message tweaks. Note: [foo] means that the message is enabled by <debug_foo>. svn path=/trunk/boinc/; revision=25849
This commit is contained in:
parent
0bb1fd6581
commit
68f9880615
|
@ -4727,3 +4727,29 @@ Charlie 5 July 2012
|
|||
|
||||
mac_installer/
|
||||
release_boinc.sh
|
||||
|
||||
David 5 July 2012
|
||||
- client: remove "device" entry from CUDA_DEVICE_PROP,
|
||||
and change types of mem-size fields from int to double.
|
||||
These fields are size_t in NVIDIA's version of this;
|
||||
however, cuDeviceGetAttribute() returns them as int,
|
||||
so I don't see where this makes any difference.
|
||||
- client: fix bug in handling of <no_rsc_apps> element.
|
||||
- scheduler: message tweaks.
|
||||
Note: [foo] means that the message is enabled by <debug_foo>.
|
||||
|
||||
|
||||
lib/
|
||||
coproc.cpp,h
|
||||
client/
|
||||
gui_nvidia.cpp
|
||||
work_fetch.cpp
|
||||
scheduler_op.cpp
|
||||
db/
|
||||
boinc_db_types.h
|
||||
boind_db.h
|
||||
vda/
|
||||
sched_vda.cpp
|
||||
sched/
|
||||
plan_class_spec.cpp
|
||||
sched_types.cpp
|
||||
|
|
|
@ -280,21 +280,20 @@ void COPROC_NVIDIA::get(
|
|||
sprintf(buf, "NVIDIA library reports %d GPU%s", cuda_ndevs, (cuda_ndevs==1)?"":"s");
|
||||
warnings.push_back(buf);
|
||||
|
||||
int j;
|
||||
int j, itemp;
|
||||
unsigned int i;
|
||||
size_t global_mem;
|
||||
COPROC_NVIDIA cc;
|
||||
string s;
|
||||
for (j=0; j<cuda_ndevs; j++) {
|
||||
memset(&cc.prop, 0, sizeof(cc.prop));
|
||||
int device;
|
||||
CUdevice device;
|
||||
retval = (*__cuDeviceGet)(&device, j);
|
||||
if (retval) {
|
||||
sprintf(buf, "cuDeviceGet(%d) returned %d", j, retval);
|
||||
warnings.push_back(buf);
|
||||
return;
|
||||
}
|
||||
cc.prop.deviceHandle = device;
|
||||
(*__cuDeviceGetName)(cc.prop.name, 256, device);
|
||||
if (retval) {
|
||||
sprintf(buf, "cuDeviceGetName(%d) returned %d", j, retval);
|
||||
|
@ -304,10 +303,12 @@ void COPROC_NVIDIA::get(
|
|||
(*__cuDeviceComputeCapability)(&cc.prop.major, &cc.prop.minor, device);
|
||||
(*__cuDeviceTotalMem)(&global_mem, device);
|
||||
cc.prop.totalGlobalMem = (double) global_mem;
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.sharedMemPerBlock, CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK, device);
|
||||
(*__cuDeviceGetAttribute)(&itemp, CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK, device);
|
||||
cc.prop.sharedMemPerBlock = itemp;
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.regsPerBlock, CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.warpSize, CU_DEVICE_ATTRIBUTE_WARP_SIZE, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.memPitch, CU_DEVICE_ATTRIBUTE_MAX_PITCH, device);
|
||||
(*__cuDeviceGetAttribute)(&itemp, CU_DEVICE_ATTRIBUTE_MAX_PITCH, device);
|
||||
cc.prop.memPitch = itemp;
|
||||
retval = (*__cuDeviceGetAttribute)(&cc.prop.maxThreadsPerBlock, CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK, device);
|
||||
retval = (*__cuDeviceGetAttribute)(&cc.prop.maxThreadsDim[0], CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.maxThreadsDim[1], CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y, device);
|
||||
|
@ -316,8 +317,10 @@ void COPROC_NVIDIA::get(
|
|||
(*__cuDeviceGetAttribute)(&cc.prop.maxGridSize[1], CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.maxGridSize[2], CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.clockRate, CU_DEVICE_ATTRIBUTE_CLOCK_RATE, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.totalConstMem, CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.textureAlignment, CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT, device);
|
||||
(*__cuDeviceGetAttribute)(&itemp, CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY, device);
|
||||
cc.prop.totalConstMem = itemp;
|
||||
(*__cuDeviceGetAttribute)(&itemp, CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT, device);
|
||||
cc.prop.textureAlignment = itemp;
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.deviceOverlap, CU_DEVICE_ATTRIBUTE_GPU_OVERLAP, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.prop.multiProcessorCount, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device);
|
||||
(*__cuDeviceGetAttribute)(&cc.pci_info.bus_id, CU_DEVICE_ATTRIBUTE_PCI_BUS_ID, device);
|
||||
|
|
|
@ -866,7 +866,7 @@ int SCHEDULER_REPLY::parse(FILE* in, PROJECT* project) {
|
|||
continue;
|
||||
} else if (xp.parse_str("no_rsc_apps", buf, sizeof(buf))) {
|
||||
if (!project->anonymous_platform) {
|
||||
handle_no_rsc_apps(buf, project, btemp);
|
||||
handle_no_rsc_apps(buf, project, true);
|
||||
}
|
||||
continue;
|
||||
} else if (xp.parse_bool("verify_files_on_app_start", project->verify_files_on_app_start)) {
|
||||
|
|
|
@ -997,4 +997,3 @@ void CLIENT_STATE::generate_new_host_cpid() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -481,43 +481,6 @@ public:
|
|||
std::vector<DB_FILESET_SCHED_TRIGGER_ITEM> items;
|
||||
};
|
||||
|
||||
struct VDA_FILE {
|
||||
int id;
|
||||
double create_time;
|
||||
char dir[256];
|
||||
char name[256];
|
||||
double size;
|
||||
bool need_update;
|
||||
bool initialized;
|
||||
bool retrieving;
|
||||
void clear();
|
||||
};
|
||||
|
||||
struct VDA_CHUNK_HOST {
|
||||
double create_time;
|
||||
int vda_file_id;
|
||||
int host_id;
|
||||
char name[256]; // C1.C2.Cn
|
||||
double size;
|
||||
bool present_on_host;
|
||||
bool transfer_in_progress;
|
||||
bool transfer_wait;
|
||||
double transfer_request_time;
|
||||
// when vdad assigned this chunk to this host
|
||||
double transfer_send_time;
|
||||
// when transfer request was sent to host
|
||||
|
||||
// the following not in DB
|
||||
//
|
||||
bool found;
|
||||
|
||||
void clear();
|
||||
inline bool download_in_progress() {
|
||||
return (transfer_in_progress && !present_on_host);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
struct DB_VDA_FILE : public DB_BASE, public VDA_FILE {
|
||||
DB_VDA_FILE(DB_CONN* p=0);
|
||||
int get_id();
|
||||
|
|
|
@ -675,4 +675,41 @@ struct HOST_APP_VERSION {
|
|||
bool daily_quota_exceeded;
|
||||
};
|
||||
|
||||
struct VDA_FILE {
|
||||
int id;
|
||||
double create_time;
|
||||
char dir[256];
|
||||
char name[256];
|
||||
double size;
|
||||
bool need_update;
|
||||
bool initialized;
|
||||
bool retrieving;
|
||||
void clear();
|
||||
};
|
||||
|
||||
struct VDA_CHUNK_HOST {
|
||||
double create_time;
|
||||
int vda_file_id;
|
||||
int host_id;
|
||||
char name[256]; // C1.C2.Cn
|
||||
double size;
|
||||
bool present_on_host;
|
||||
bool transfer_in_progress;
|
||||
bool transfer_wait;
|
||||
double transfer_request_time;
|
||||
// when vdad assigned this chunk to this host
|
||||
double transfer_send_time;
|
||||
// when transfer request was sent to host
|
||||
|
||||
// the following not in DB
|
||||
//
|
||||
bool found;
|
||||
|
||||
void clear();
|
||||
inline bool download_in_progress() {
|
||||
return (transfer_in_progress && !present_on_host);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -425,36 +425,36 @@ void COPROC_NVIDIA::write_xml(MIOFILE& f, bool scheduler_rpc) {
|
|||
" <cudaVersion>%d</cudaVersion>\n"
|
||||
" <drvVersion>%d</drvVersion>\n"
|
||||
" <totalGlobalMem>%f</totalGlobalMem>\n"
|
||||
" <sharedMemPerBlock>%u</sharedMemPerBlock>\n"
|
||||
" <sharedMemPerBlock>%f</sharedMemPerBlock>\n"
|
||||
" <regsPerBlock>%d</regsPerBlock>\n"
|
||||
" <warpSize>%d</warpSize>\n"
|
||||
" <memPitch>%u</memPitch>\n"
|
||||
" <memPitch>%f</memPitch>\n"
|
||||
" <maxThreadsPerBlock>%d</maxThreadsPerBlock>\n"
|
||||
" <maxThreadsDim>%d %d %d</maxThreadsDim>\n"
|
||||
" <maxGridSize>%d %d %d</maxGridSize>\n"
|
||||
" <clockRate>%d</clockRate>\n"
|
||||
" <totalConstMem>%u</totalConstMem>\n"
|
||||
" <totalConstMem>%f</totalConstMem>\n"
|
||||
" <major>%d</major>\n"
|
||||
" <minor>%d</minor>\n"
|
||||
" <textureAlignment>%u</textureAlignment>\n"
|
||||
" <textureAlignment>%f</textureAlignment>\n"
|
||||
" <deviceOverlap>%d</deviceOverlap>\n"
|
||||
" <multiProcessorCount>%d</multiProcessorCount>\n",
|
||||
peak_flops,
|
||||
cuda_version,
|
||||
display_driver_version,
|
||||
prop.totalGlobalMem,
|
||||
(unsigned int)prop.sharedMemPerBlock,
|
||||
prop.sharedMemPerBlock,
|
||||
prop.regsPerBlock,
|
||||
prop.warpSize,
|
||||
(unsigned int)prop.memPitch,
|
||||
prop.memPitch,
|
||||
prop.maxThreadsPerBlock,
|
||||
prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2],
|
||||
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2],
|
||||
prop.clockRate,
|
||||
(unsigned int)prop.totalConstMem,
|
||||
prop.totalConstMem,
|
||||
prop.major,
|
||||
prop.minor,
|
||||
(unsigned int)prop.textureAlignment,
|
||||
prop.textureAlignment,
|
||||
prop.deviceOverlap,
|
||||
prop.multiProcessorCount
|
||||
);
|
||||
|
@ -480,7 +480,6 @@ void COPROC_NVIDIA::clear() {
|
|||
cuda_version = 0;
|
||||
display_driver_version = 0;
|
||||
strcpy(prop.name, "");
|
||||
prop.deviceHandle = 0;
|
||||
prop.totalGlobalMem = 0;
|
||||
prop.sharedMemPerBlock = 0;
|
||||
prop.regsPerBlock = 0;
|
||||
|
@ -529,12 +528,11 @@ int COPROC_NVIDIA::parse(XML_PARSER& xp) {
|
|||
if (xp.parse_int("cudaVersion", cuda_version)) continue;
|
||||
if (xp.parse_int("drvVersion", display_driver_version)) continue;
|
||||
if (xp.parse_str("name", prop.name, sizeof(prop.name))) continue;
|
||||
if (xp.parse_int("deviceHandle", prop.deviceHandle)) continue;
|
||||
if (xp.parse_double("totalGlobalMem", prop.totalGlobalMem)) continue;
|
||||
if (xp.parse_int("sharedMemPerBlock", (int&)prop.sharedMemPerBlock)) continue;
|
||||
if (xp.parse_double("sharedMemPerBlock", prop.sharedMemPerBlock)) continue;
|
||||
if (xp.parse_int("regsPerBlock", prop.regsPerBlock)) continue;
|
||||
if (xp.parse_int("warpSize", prop.warpSize)) continue;
|
||||
if (xp.parse_int("memPitch", (int&)prop.memPitch)) continue;
|
||||
if (xp.parse_double("memPitch", prop.memPitch)) continue;
|
||||
if (xp.parse_int("maxThreadsPerBlock", prop.maxThreadsPerBlock)) continue;
|
||||
if (xp.parse_str("maxThreadsDim", buf2, sizeof(buf2))) {
|
||||
// can't use sscanf here (FCGI)
|
||||
|
@ -567,10 +565,10 @@ int COPROC_NVIDIA::parse(XML_PARSER& xp) {
|
|||
continue;
|
||||
}
|
||||
if (xp.parse_int("clockRate", prop.clockRate)) continue;
|
||||
if (xp.parse_int("totalConstMem", (int&)prop.totalConstMem)) continue;
|
||||
if (xp.parse_double("totalConstMem", prop.totalConstMem)) continue;
|
||||
if (xp.parse_int("major", prop.major)) continue;
|
||||
if (xp.parse_int("minor", prop.minor)) continue;
|
||||
if (xp.parse_int("textureAlignment", (int&)prop.textureAlignment)) continue;
|
||||
if (xp.parse_double("textureAlignment", prop.textureAlignment)) continue;
|
||||
if (xp.parse_int("deviceOverlap", prop.deviceOverlap)) continue;
|
||||
if (xp.parse_int("multiProcessorCount", prop.multiProcessorCount)) continue;
|
||||
if (xp.match_tag("pci_info")) {
|
||||
|
@ -892,9 +890,10 @@ void COPROC_ATI::fake(double ram, double avail_ram, int n) {
|
|||
|
||||
const char* proc_type_name_xml(int pt) {
|
||||
switch(pt) {
|
||||
case PROC_TYPE_CPU: return "CPU";
|
||||
case PROC_TYPE_NVIDIA_GPU: return "CUDA";
|
||||
case PROC_TYPE_AMD_GPU: return "ATI";
|
||||
case PROC_TYPE_INTEL_GPU: return "INTEL";
|
||||
case PROC_TYPE_INTEL_GPU: return "intel_gpu";
|
||||
}
|
||||
return "unknown";
|
||||
}
|
||||
|
|
11
lib/coproc.h
11
lib/coproc.h
|
@ -288,24 +288,25 @@ struct COPROC {
|
|||
//
|
||||
struct CUDA_DEVICE_PROP {
|
||||
char name[256];
|
||||
int deviceHandle;
|
||||
double totalGlobalMem;
|
||||
int sharedMemPerBlock;
|
||||
double sharedMemPerBlock;
|
||||
int regsPerBlock;
|
||||
int warpSize;
|
||||
int memPitch;
|
||||
double memPitch;
|
||||
int maxThreadsPerBlock;
|
||||
int maxThreadsDim[3];
|
||||
int maxGridSize[3];
|
||||
int clockRate;
|
||||
int totalConstMem;
|
||||
double totalConstMem;
|
||||
int major; // compute capability
|
||||
int minor;
|
||||
int textureAlignment;
|
||||
double textureAlignment;
|
||||
int deviceOverlap;
|
||||
int multiProcessorCount;
|
||||
};
|
||||
|
||||
typedef int CUdevice;
|
||||
|
||||
struct COPROC_NVIDIA : public COPROC {
|
||||
int cuda_version; // CUDA runtime version
|
||||
int display_driver_version;
|
||||
|
|
|
@ -72,7 +72,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (!strstr(buf, cpu_features[i].c_str())) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] CPU lacks feature '%s' (got '%s')\n",
|
||||
"[version] plan_class_spec: CPU lacks feature '%s' (got '%s')\n",
|
||||
cpu_features[i].c_str(), sreq.host.p_features
|
||||
);
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (min_ncpus && g_wreq->effective_ncpus < min_ncpus) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] not enough CPUs: %d < %f\n",
|
||||
"[version] plan_class_spec: not enough CPUs: %d < %f\n",
|
||||
g_wreq->effective_ncpus, min_ncpus
|
||||
);
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (have_os_regex && regexec(&(os_regex), sreq.host.os_version, 0, NULL, 0)) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] OS version '%s' didn't match regexp\n",
|
||||
"[version] plan_class_spec: OS version '%s' didn't match regexp\n",
|
||||
sreq.host.os_version
|
||||
);
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (n != 3) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] can't parse vbox version\n"
|
||||
"[version] plan_class_spec: can't parse vbox version\n"
|
||||
);
|
||||
}
|
||||
return false;
|
||||
|
@ -134,7 +134,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (min_vbox_version && v < min_vbox_version) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] vbox version too low: %d < %d\n",
|
||||
"[version] plan_class_spec: vbox version too low: %d < %d\n",
|
||||
v, min_vbox_version
|
||||
);
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (max_vbox_version && v > max_vbox_version) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] vbox version too high: %d > %d\n",
|
||||
"[version] plan_class_spec: vbox version too high: %d > %d\n",
|
||||
v, max_vbox_version
|
||||
);
|
||||
}
|
||||
|
@ -182,14 +182,14 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
bool p = parse_str(buf, tag, value, sizeof(value));
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] parsed project prefs setting '%s' : %s\n",
|
||||
"[version] plan_class_spec: parsed project prefs setting '%s' : %s\n",
|
||||
project_prefs_tag, p?"true":"false"
|
||||
);
|
||||
}
|
||||
if (regexec(&(project_prefs_regex), value, 0, NULL, 0)) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] project prefs setting '%s' prevents using plan class.\n",
|
||||
"[version] plan_class_spec: project prefs setting '%s' prevents using plan class.\n",
|
||||
project_prefs_tag
|
||||
);
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
bool p = parse_double(buf, tag, v);
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] parsed project prefs setting '%s' : %s : %f\n",
|
||||
"[version] plan_class_spec: parsed project prefs setting '%s' : %s : %f\n",
|
||||
gpu_utilization_tag, p?"true":"false", v
|
||||
);
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (!cp.count) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] No ATI devices found\n"
|
||||
"[version] plan_class_spec: No AMD GPUs found\n"
|
||||
);
|
||||
}
|
||||
return false;
|
||||
|
@ -245,14 +245,18 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (need_ati_libs) {
|
||||
if (!cp.atirt_detected) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,"[version] [plan_class_spec] ATI run time libraries not found\n");
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] plan_class_spec: ATI libraries not found\n"
|
||||
);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (!cp.amdrt_detected) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,"[version] [plan_class_spec] [plan_class_spec] AMD run time libraries not found\n");
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] plan_class_spec: AMD libraries not found\n"
|
||||
);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -261,7 +265,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (min_cal_target && cp.attribs.target < min_cal_target) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] CAL target less than minimum (%d < %d)\n",
|
||||
"[version] plan_class_spec: CAL target less than minimum (%d < %d)\n",
|
||||
cp.attribs.target, min_cal_target
|
||||
);
|
||||
}
|
||||
|
@ -270,7 +274,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (max_cal_target && cp.attribs.target > max_cal_target) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] CAL target greater than maximum (%d > %d)\n",
|
||||
"[version] plan_class_spec: CAL target greater than maximum (%d > %d)\n",
|
||||
cp.attribs.target, max_cal_target
|
||||
);
|
||||
}
|
||||
|
@ -287,7 +291,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (scanned != 3) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] driver version '%s' couldn't be parsed\n",
|
||||
"[version] plan_class_spec: driver version '%s' couldn't be parsed\n",
|
||||
cp.version
|
||||
);
|
||||
}
|
||||
|
@ -298,7 +302,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
} else {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] no CAL, driver version couldn't be determined\n"
|
||||
"[version] plan_class_spec: no CAL, driver version couldn't be determined\n"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -312,7 +316,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (!cp.count) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] No NVIDIA devices found\n"
|
||||
"[version] plan_class_spec: No NVIDIA GPUs found\n"
|
||||
);
|
||||
}
|
||||
return false;
|
||||
|
@ -329,7 +333,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (min_nvidia_compcap && min_nvidia_compcap > v) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] NVIDIA compute capability required min: %d, supplied: %d\n",
|
||||
"[version] plan_class_spec: NVIDIA compute capability required min: %d, supplied: %d\n",
|
||||
min_nvidia_compcap, v
|
||||
);
|
||||
}
|
||||
|
@ -338,7 +342,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (max_nvidia_compcap && max_nvidia_compcap < v) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] CUDA compute capability required max: %d, supplied: %d\n",
|
||||
"[version] plan_class_spec: CUDA compute capability required max: %d, supplied: %d\n",
|
||||
max_nvidia_compcap, v
|
||||
);
|
||||
}
|
||||
|
@ -349,7 +353,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (min_cuda_version && min_cuda_version > cp.cuda_version) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] CUDA version required min: %d, supplied: %d\n",
|
||||
"[version] plan_class_spec: CUDA version required min: %d, supplied: %d\n",
|
||||
min_cuda_version, cp.cuda_version
|
||||
);
|
||||
}
|
||||
|
@ -358,7 +362,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (max_cuda_version && max_cuda_version < cp.cuda_version) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] CUDA version required max: %d, supplied: %d\n",
|
||||
"[version] plan_class_spec: CUDA version required max: %d, supplied: %d\n",
|
||||
max_cuda_version, cp.cuda_version
|
||||
);
|
||||
}
|
||||
|
@ -377,7 +381,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (!cp.count) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] No NVIDIA devices found\n"
|
||||
"[version] [version] No Intel GPUs found\n"
|
||||
);
|
||||
}
|
||||
return false;
|
||||
|
@ -392,7 +396,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (!cpp->have_opencl) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] GPU doesn't support OpenCL\n"
|
||||
"[version] GPU doesn't support OpenCL\n"
|
||||
);
|
||||
}
|
||||
return false;
|
||||
|
@ -403,7 +407,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (min_opencl_version && min_opencl_version > cpp->opencl_prop.opencl_device_version_int) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] OpenCL device version required min: %d, supplied: %d\n",
|
||||
"[version] OpenCL device version required min: %d, supplied: %d\n",
|
||||
min_opencl_version, cpp->opencl_prop.opencl_device_version_int
|
||||
);
|
||||
}
|
||||
|
@ -421,7 +425,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (min_gpu_ram_mb && min_gpu_ram_mb * MEGA > gpu_ram) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] GPU RAM required min: %f, supplied: %f\n",
|
||||
"[version] plan_class_spec: GPU RAM required min: %f, supplied: %f\n",
|
||||
min_gpu_ram_mb * MEGA, gpu_ram
|
||||
);
|
||||
}
|
||||
|
@ -433,7 +437,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (min_driver_version > driver_version) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] driver version required min: %d, supplied: %d\n",
|
||||
"[version] plan_class_spec: driver version required min: %d, supplied: %d\n",
|
||||
abs(min_driver_version), driver_version
|
||||
);
|
||||
}
|
||||
|
@ -444,7 +448,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
if (max_driver_version < driver_version) {
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] driver version required max: %d, supplied: %d\n",
|
||||
"[version] plan_class_spec: driver version required max: %d, supplied: %d\n",
|
||||
abs(max_driver_version), driver_version
|
||||
);
|
||||
}
|
||||
|
@ -514,7 +518,7 @@ bool PLAN_CLASS_SPEC::check(SCHEDULER_REQUEST& sreq, HOST_USAGE& hu) {
|
|||
|
||||
if (config.debug_version_select) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[version] [plan_class_spec] host_flops: %e, \tscale: %.2f, \tprojected_flops: %e, \tpeak_flops: %e\n",
|
||||
"[version] plan_class_spec: host_flops: %e, \tscale: %.2f, \tprojected_flops: %e, \tpeak_flops: %e\n",
|
||||
sreq.host.p_fpops, projected_flops_scale, hu.projected_flops,
|
||||
hu.peak_flops
|
||||
);
|
||||
|
|
|
@ -892,16 +892,25 @@ int SCHEDULER_REPLY::write(FILE* fout, SCHEDULER_REQUEST& sreq) {
|
|||
fprintf(fout, "%s", file_transfer_requests[i].c_str());
|
||||
}
|
||||
|
||||
fprintf(fout,
|
||||
"<no_cpu_apps>%d</no_cpu_apps>\n"
|
||||
"<no_cuda_apps>%d</no_cuda_apps>\n"
|
||||
"<no_ati_apps>%d</no_ati_apps>\n"
|
||||
"<no_intel_apps>%d</no_intel_apps>\n",
|
||||
ssp->have_apps_for_proc_type[PROC_TYPE_CPU]?0:1,
|
||||
ssp->have_apps_for_proc_type[PROC_TYPE_NVIDIA_GPU]?0:1,
|
||||
ssp->have_apps_for_proc_type[PROC_TYPE_AMD_GPU]?0:1,
|
||||
ssp->have_apps_for_proc_type[PROC_TYPE_INTEL_GPU]?0:1
|
||||
);
|
||||
if (g_request->core_client_version < 73000) {
|
||||
fprintf(fout,
|
||||
"<no_cpu_apps>%d</no_cpu_apps>\n"
|
||||
"<no_cuda_apps>%d</no_cuda_apps>\n"
|
||||
"<no_ati_apps>%d</no_ati_apps>\n",
|
||||
ssp->have_apps_for_proc_type[PROC_TYPE_CPU]?0:1,
|
||||
ssp->have_apps_for_proc_type[PROC_TYPE_NVIDIA_GPU]?0:1,
|
||||
ssp->have_apps_for_proc_type[PROC_TYPE_AMD_GPU]?0:1
|
||||
);
|
||||
} else {
|
||||
for (i=0; i<NPROC_TYPES; i++) {
|
||||
if (!ssp->have_apps_for_proc_type[i]) {
|
||||
fprintf(fout,
|
||||
"<no_rsc_apps>%s</no_rsc_apps>\n",
|
||||
proc_type_name_xml(i)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
gui_urls.get_gui_urls(user, host, team, buf);
|
||||
fputs(buf, fout);
|
||||
if (project_files.text) {
|
||||
|
|
|
@ -153,13 +153,30 @@ static int process_completed_upload(char* chunk_name, CHUNK_LIST& chunks) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// process a present file
|
||||
// Process a present file; possibilities:
|
||||
// - a download finished
|
||||
// - this host hasn't communicated in a while, and we deleted the
|
||||
// VDA_CHUNK_HOST record
|
||||
// So:
|
||||
// - create a vda_chunk_host record if needed
|
||||
// - set present_on_host flag in vda_chunk_host
|
||||
// - mark our in-memory vda_chunk_host record as "found"
|
||||
// - mark vda_file for update
|
||||
//
|
||||
static int process_present_file(FILE_INFO& fi, CHUNK_LIST& chunks) {
|
||||
DB_VDA_CHUNK_HOST* ch;
|
||||
CHUNK_LIST::iterator cli = chunks.find(string(fi.name));
|
||||
if (cli == chunks.end()) {
|
||||
// don't have a record of this chunk on this host; make one
|
||||
//
|
||||
ch = new DB_VDA_CHUNK_HOST;
|
||||
ch->create_time = dtime();
|
||||
|
||||
} else {
|
||||
ch = &(cli->second);
|
||||
}
|
||||
ch->transfer_in_progress = false;
|
||||
mark_for_update(ch->vda_file_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -327,22 +344,24 @@ void handle_vda() {
|
|||
}
|
||||
|
||||
// process completed uploads
|
||||
// NOTE: completed downloads are handled below
|
||||
//
|
||||
for (i=0; i<g_request->file_xfer_results.size(); i++) {
|
||||
RESULT& r = g_request->file_xfer_results[i];
|
||||
if (!starts_with(r.name, "vda_upload_")) continue;
|
||||
char* chunk_name = r.name + strlen("vda_upload_");
|
||||
if (config.debug_vda) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[vda] DB: completed upload %s\n", chunk_name
|
||||
);
|
||||
}
|
||||
retval = process_completed_upload(chunk_name, chunks);
|
||||
if (retval) {
|
||||
log_messages.printf(MSG_CRITICAL,
|
||||
"[vda] process_completed_upload(): %d\n", retval
|
||||
);
|
||||
return;
|
||||
if (strstr(r.name, "vda_upload")) {
|
||||
char* chunk_name = r.name + strlen("vda_upload_");
|
||||
if (config.debug_vda) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[vda] DB: completed upload %s\n", chunk_name
|
||||
);
|
||||
}
|
||||
retval = process_completed_upload(chunk_name, chunks);
|
||||
if (retval) {
|
||||
log_messages.printf(MSG_CRITICAL,
|
||||
"[vda] process_completed_upload(): %d\n", retval
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue