- client: if an app has avg_ncpus < 1, run it at above-idle priority

even if it doesn't use a coprocessor.
- scheduler: added an "nci" (non CPU intensive) plan class
    to sched_plan.cpp.  It declares the use of 1% of a CPU.

The above two changes are intended to allow the QCN app to
run at above_idle priority, which it needs in order to do 500Hz polling.

- API: the std::string version of boinc_resolve_filename()
    acts the same as the char[] version.

svn path=/trunk/boinc/; revision=16985
This commit is contained in:
David Anderson 2009-01-22 19:51:04 +00:00
parent dd074868f6
commit 268b694932
6 changed files with 45 additions and 15 deletions

View File

@ -557,3 +557,24 @@ Charlie Jan 22 2009
clientgui/
BOINCClientManager.cpp
David 22 Jan 2009
- client: if an app has avg_ncpus < 1, run it at above-idle priority
even if it doesn't use a coprocessor.
- scheduler: added an "nci" (non CPU intensive) plan class
to sched_plan.cpp. It declares the use of 1% of a CPU.
The above two changes are intended to allow the QCN app to
run at above_idle priority, which it needs in order to do 500Hz polling.
- API: the std::string version of boinc_resolve_filename()
acts the same as the char[] version.
client/
app.cpp
app_start.cpp
lib/
app_ipc.cpp
gui_rpc_client.cpp
sched/
sched_plan.cpp

View File

@ -532,6 +532,7 @@ int ACTIVE_TASK::parse(MIOFILE& fin) {
int n, dummy;
unsigned int i;
PROJECT* project;
double x;
strcpy(result_name, "");
strcpy(project_master_url, "");
@ -622,6 +623,7 @@ int ACTIVE_TASK::parse(MIOFILE& fin) {
else if (parse_double(buf, "<working_set_size>", procinfo.working_set_size)) continue;
else if (parse_double(buf, "<working_set_size_smoothed>", procinfo.working_set_size_smoothed)) continue;
else if (parse_double(buf, "<page_fault_rate>", procinfo.page_fault_rate)) continue;
else if (parse_double(buf, "<current_cpu_time>", x)) continue;
else {
if (log_flags.unparsed_xml) {
msg_printf(0, MSG_INFO,

View File

@ -366,9 +366,9 @@ int ACTIVE_TASK::start(bool first_time) {
int retval;
bool coprocs_reserved = false;
// if this job uses a GPU and not much CPU, run it at normal priority
// if this job less than one CPU, run it at above idle priority
//
bool high_priority = result->uses_coprocs() && (app_version->avg_ncpus < 1);
bool high_priority = (app_version->avg_ncpus < 1);
if (first_time && log_flags.task) {
msg_printf(result->project, MSG_INFO,

View File

@ -356,7 +356,7 @@ int boinc_resolve_filename_s(const char *virtual_name, string& physical_name) {
}
#endif
FILE *fp = boinc_fopen(virtual_name, "r");
if (!fp) return ERR_FOPEN;
if (!fp) return 0;
buf[0] = 0;
p = fgets(buf, 512, fp);
fclose(fp);

View File

@ -129,20 +129,19 @@ int RPC_CLIENT::init_asynch(
}
retval = boinc_socket(sock);
BOINCTRACE("RPC_CLIENT::init boinc_socket returned %d\n", sock);
BOINCTRACE("init_asynch() boinc_socket: %d\n", sock);
if (retval) return retval;
retval = boinc_socket_asynch(sock, true);
if (retval) {
BOINCTRACE("RPC_CLIENT::init asynch error: %d\n", retval);
BOINCTRACE("init_asynch() boinc_socket_asynch: %d\n", retval);
}
start_time = dtime();
retval = connect(sock, (const sockaddr*)(&addr), sizeof(addr));
if (retval) {
perror("connect");
BOINCTRACE("RPC_CLIENT::init connect returned %d\n", retval);
perror("init_asynch(): connect");
BOINCTRACE("init_asynch() connect: %d\n", retval);
}
BOINCTRACE("RPC_CLIENT::init attempting connect \n");
return 0;
}
@ -159,7 +158,7 @@ int RPC_CLIENT::init_poll() {
FD_SET(sock, &write_fds);
FD_SET(sock, &error_fds);
BOINCTRACE("RPC_CLIENT::init_poll sock = %d\n", sock);
BOINCTRACE("init_poll(): sock = %d\n", sock);
tv.tv_sec = tv.tv_usec = 0;
select(FD_SETSIZE, &read_fds, &write_fds, &error_fds, &tv);
@ -169,19 +168,19 @@ int RPC_CLIENT::init_poll() {
} else if (FD_ISSET(sock, &write_fds)) {
retval = get_socket_error(sock);
if (!retval) {
BOINCTRACE("RPC_CLIENT::init_poll connected to port %d\n", ntohs(addr.sin_port));
BOINCTRACE("init_poll(): connected to port %d\n", ntohs(addr.sin_port));
retval = boinc_socket_asynch(sock, false);
if (retval) {
BOINCTRACE("asynch error: %d\n", retval);
BOINCTRACE("init_poll(): boinc_socket_asynch: %d\n", retval);
return retval;
}
return 0;
} else {
BOINCTRACE("init_poll: get_socket_error(): %d\n", retval);
BOINCTRACE("init_poll(): get_socket_error(): %d\n", retval);
}
}
if (dtime() > start_time + timeout) {
BOINCTRACE("RPC_CLIENT init timed out\n");
BOINCTRACE("asynch init timed out\n");
return ERR_CONNECT;
}
if (retval) {
@ -190,7 +189,7 @@ int RPC_CLIENT::init_poll() {
retval = boinc_socket(sock);
retval = boinc_socket_asynch(sock, true);
retval = connect(sock, (const sockaddr*)(&addr), sizeof(addr));
BOINCTRACE("RPC_CLIENT::init_poll attempting connect\n");
BOINCTRACE("init_poll(): retrying connect: %d\n", retval);
return ERR_RETRY;
} else {
return ERR_CONNECT;

View File

@ -47,7 +47,7 @@ int app_plan(SCHEDULER_REQUEST& sreq, char* plan_class, HOST_USAGE& hu) {
// the following is for an app that can use anywhere
// from 1 to 64 threads, can control this exactly,
// and whose speedup is .95N
// (so on a uniprocessor, we'll use a sequential app if one is available)
// (on a uniprocessor, we'll use a sequential app if one is available)
//
int ncpus, nthreads;
bool bounded;
@ -140,6 +140,14 @@ int app_plan(SCHEDULER_REQUEST& sreq, char* plan_class, HOST_USAGE& hu) {
);
}
return 0;
} else if (!strcmp(plan_class, "nci")) {
// The following is for a non-CPU-intensive application.
// Say that we'll use 1% of a CPU.
// This will cause the client (6.7+) to run it at non-idle priority
//
hu.avg_ncpus = .01;
hu.max_ncpus = .01;
hu.flops = .01*sreq.host.p_fpops;
}
log_messages.printf(MSG_CRITICAL,
"Unknown plan class: %s\n", plan_class