mirror of https://github.com/BOINC/boinc.git
- user web: fixed bug in private-message code
- back end: moved HR_INFO to a separate file; did some debugging in HR code html/ inc/ forum_user.inc util.inc user/ forum_pm.php sched/ Makefile.am census.C feeder.C hr.C,h hr_info.C,h (new) svn path=/trunk/boinc/; revision=12978
This commit is contained in:
parent
7d302fb1ba
commit
c912a6506b
|
@ -6494,12 +6494,31 @@ David 20 June 2007
|
|||
updater.C
|
||||
|
||||
Janus 21 June 2007
|
||||
- Added the scraping mechanism for Bittorrent support (not all Bittorrent clients
|
||||
actually support this, but those that do get an increased performance boost and
|
||||
don't have to contact the more expensive announce.php nearly as often as those
|
||||
that do not support it. Since it hasn't yet been decided which BT library we
|
||||
will be using I just added scraping anyways.
|
||||
- Added the scraping mechanism for Bittorrent support
|
||||
(not all Bittorrent clients actually support this,
|
||||
but those that do get an increased performance boost and
|
||||
don't have to contact the more expensive announce.php
|
||||
nearly as often as those that do not support it.
|
||||
Since it hasn't yet been decided which BT library we
|
||||
will be using I just added scraping anyways.
|
||||
|
||||
html/bt/
|
||||
scrape.php (new)
|
||||
|
||||
David 21 June 2007
|
||||
- user web: fixed bug in private-message code
|
||||
- back end: moved HR_INFO to a separate file;
|
||||
did some debugging in HR code
|
||||
|
||||
html/
|
||||
inc/
|
||||
forum_user.inc
|
||||
util.inc
|
||||
user/
|
||||
forum_pm.php
|
||||
sched/
|
||||
Makefile.am
|
||||
census.C
|
||||
feeder.C
|
||||
hr.C,h
|
||||
hr_info.C,h (new)
|
||||
|
|
|
@ -118,7 +118,7 @@ if ($send_email) {
|
|||
$volid = $_GET['volid'];
|
||||
$subject = stripslashes($_GET['subject']);
|
||||
$vol = vol_lookup($volid);
|
||||
if (!$vol) {
|
||||
if (!$vol || $vol->hide) {
|
||||
error_page("No such volunteer $volid");
|
||||
}
|
||||
$msg = stripslashes($_GET['message']);
|
||||
|
|
|
@ -141,6 +141,10 @@ function show_nsf() {
|
|||
}
|
||||
|
||||
html_tag();
|
||||
if (defined("CHARSET")) {
|
||||
header("Content-type: text/html; charset=".tr(CHARSET));
|
||||
}
|
||||
|
||||
echo "
|
||||
<head>
|
||||
<link rel=\"shortcut icon\" href=\"iconsmall.ico\">
|
||||
|
|
|
@ -34,8 +34,13 @@ class User {
|
|||
} else {
|
||||
$this->dbObj = $this->dbhandler->getUser($id);
|
||||
}
|
||||
if ($optional_prefobj) {$this->fprefObj = $optional_prefobj; $this->prefs_loaded=true;};
|
||||
if (!$this->dbObj) error_page("User with id $id created but nothing returned from DB layer");
|
||||
if ($optional_prefobj) {
|
||||
$this->fprefObj = $optional_prefobj;
|
||||
$this->prefs_loaded=true;
|
||||
};
|
||||
if (!$this->dbObj) {
|
||||
error_page("User with id $id created but nothing returned from DB layer");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -101,11 +101,11 @@ function show_login($user) {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Page_head() is overridable so that projects that want to integrate BOINC with an existing web framework
|
||||
* can more easily do so. To take advantage of this simply define the function page_head() somewhere
|
||||
* in the project include file.
|
||||
*/
|
||||
// Page_head() is overridable so that projects that want to integrate BOINC
|
||||
// with an existing web framework can more easily do so.
|
||||
// To take advantage of this simply define the function page_head()
|
||||
// somewhere in the project include file.
|
||||
//
|
||||
if (!function_exists("page_head")){
|
||||
function page_head($title, $java_onload=null, $title_plain=null, $prefix="") {
|
||||
$styleSheet = URL_BASE . STYLESHEET;
|
||||
|
|
|
@ -73,7 +73,7 @@ if ($action == "inbox") {
|
|||
}
|
||||
|
||||
} elseif ($action == "new") {
|
||||
check_banished(new User($logged_in_user));
|
||||
check_banished(new User($logged_in_user->id));
|
||||
pm_create_new();
|
||||
} elseif ($action == "delete") {
|
||||
$id = get_int("id", true);
|
||||
|
@ -106,7 +106,7 @@ if ($action == "inbox") {
|
|||
}
|
||||
}
|
||||
} elseif ($action == "send") {
|
||||
check_banished(new User($logged_in_user));
|
||||
check_banished(new User($logged_in_user->id));
|
||||
check_tokens($logged_in_user->authenticator);
|
||||
|
||||
$to = stripslashes(post_str("to", true));
|
||||
|
|
|
@ -66,6 +66,7 @@ cgi_SOURCES = \
|
|||
edf_sim.C \
|
||||
handle_request.C \
|
||||
hr.C \
|
||||
hr_info.C \
|
||||
main.C \
|
||||
sched_array.C \
|
||||
sched_hr.C \
|
||||
|
@ -78,7 +79,8 @@ cgi_SOURCES = \
|
|||
|
||||
census_SOURCES = \
|
||||
census.C \
|
||||
hr.C
|
||||
hr.C \
|
||||
hr_info.C
|
||||
|
||||
## install header-files with prefix-subdir BOINC/ to avoid name-conflicts
|
||||
includedir = ${prefix}/include/BOINC/
|
||||
|
@ -96,6 +98,8 @@ cgi_DEPENDENCIES = $(LIB_SCHED)
|
|||
|
||||
feeder_SOURCES = \
|
||||
feeder.C \
|
||||
hr.C \
|
||||
hr_info.C \
|
||||
../lib/synch.C
|
||||
|
||||
feeder_DEPENDENCIES = $(LIB_SCHED)
|
||||
|
@ -167,6 +171,8 @@ delete_file_LDADD = $(LDADD) $(RSA_LIBS)
|
|||
|
||||
fcgi_SOURCES = \
|
||||
handle_request.C \
|
||||
hr.C \
|
||||
hr_info.C \
|
||||
main.C \
|
||||
sched_send.C \
|
||||
sched_resend.C \
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "sched_config.h"
|
||||
#include "sched_util.h"
|
||||
#include "sched_msgs.h"
|
||||
#include "hr.h"
|
||||
#include "hr_info.h"
|
||||
|
||||
int main() {
|
||||
HR_INFO hri;
|
||||
|
@ -52,6 +52,7 @@ int main() {
|
|||
exit(1);
|
||||
}
|
||||
boinc_db.set_isolation_level(READ_UNCOMMITTED);
|
||||
hri.init();
|
||||
hri.scan_db();
|
||||
hri.write_file("foobar");
|
||||
hri.write_file();
|
||||
}
|
||||
|
|
161
sched/feeder.C
161
sched/feeder.C
|
@ -17,9 +17,10 @@
|
|||
// or write to the Free Software Foundation, Inc.,
|
||||
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
// -------------------------------
|
||||
// Feeder: create a shared memory segment containing DB info,
|
||||
// including an array of work items (results/workunits to send).
|
||||
//
|
||||
// feeder
|
||||
// Usage: feeder [ options ]
|
||||
// [ -d x ] debug level x
|
||||
// [ -random_order ] order by "random" field of result
|
||||
// [ -priority_order ] order by decreasing "priority" field of result
|
||||
|
@ -32,9 +33,6 @@
|
|||
// that have been there for longer then x minutes
|
||||
// but haven't been assigned
|
||||
//
|
||||
// Creates a shared memory segment containing DB info,
|
||||
// including an array of work items (results/workunits to send).
|
||||
//
|
||||
// The feeder tries to keep the work array filled.
|
||||
// It maintains a DB enumerator (DB_WORK_ITEM).
|
||||
// scan_work_array() scans the work array.
|
||||
|
@ -62,6 +60,20 @@
|
|||
// app_count[] is the number of slots per app
|
||||
// (approximately proportional to its weight)
|
||||
|
||||
// Homogeneous redundancy (HR):
|
||||
// If HR is used, jobs can either be "uncommitted"
|
||||
// (can send to any HR class)
|
||||
// or "committed" (can send only to one HR class).
|
||||
// The feeder tries to maintain a ratio of committed to uncommitted
|
||||
// (generally 50/50) and, of committed jobs, ratios between HR classes
|
||||
// (proportional to the total RAC of hosts in that class).
|
||||
// This is to maximize the likelihood of having work for an average host.
|
||||
//
|
||||
// If you use different HR types between apps, you must use -allapps.
|
||||
// Otherwise we wouldn't know how many slots to reserve for each HR type.
|
||||
//
|
||||
// It's OK to use HR for some apps and not others.
|
||||
|
||||
// Trigger files:
|
||||
// The feeder program periodically checks for two trigger files:
|
||||
//
|
||||
|
@ -98,6 +110,7 @@ using std::vector;
|
|||
#include "sched_shmem.h"
|
||||
#include "sched_util.h"
|
||||
#include "sched_msgs.h"
|
||||
#include "hr_info.h"
|
||||
|
||||
#define DEFAULT_SLEEP_INTERVAL 5
|
||||
|
||||
|
@ -125,6 +138,9 @@ int *app_indices;
|
|||
int napps;
|
||||
// if -allapps, the number of apps
|
||||
// otherwise one
|
||||
HR_INFO hr_info;
|
||||
bool using_hr;
|
||||
// true iff any app is using HR
|
||||
|
||||
void cleanup_shmem() {
|
||||
ssp->ready = false;
|
||||
|
@ -153,6 +169,38 @@ int check_reread_trigger() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
// Count the # of slots used by HR classes.
|
||||
// This is done at the start of each array scan,
|
||||
// and doesn't reflect slots that have been emptied out by the scheduler
|
||||
//
|
||||
void hr_count_slots() {
|
||||
int i, j;
|
||||
|
||||
for (i=1; i<HR_NTYPES; i++) {
|
||||
if (!hr_info.type_being_used[i]) continue;
|
||||
for (j=0; j<hr_nclasses[i]; j++) {
|
||||
hr_info.cur_slots[i][j] = 0;
|
||||
}
|
||||
}
|
||||
for (i=0; i<ssp->max_wu_results; i++) {
|
||||
int app_index = app_indices[i];
|
||||
int hrt = ssp->apps[app_index].homogeneous_redundancy;
|
||||
if (!hrt) continue;
|
||||
|
||||
WU_RESULT& wu_result = ssp->wu_results[i];
|
||||
if (wu_result.state == WR_STATE_PRESENT) {
|
||||
int hrc = wu_result.workunit.hr_class;
|
||||
if (hrc < 0 || hrc >= hr_nclasses[hrt]) {
|
||||
log_messages.printf(SCHED_MSG_LOG::MSG_CRITICAL,
|
||||
"HR class %d is out of range\n", hrc
|
||||
);
|
||||
continue;
|
||||
}
|
||||
hr_info.cur_slots[hrt][hrc]++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Enumerate jobs from DB until find one that is not already in the work array.
|
||||
// If find one, return true.
|
||||
// If reach end of enum for second time on this array scan, return false
|
||||
|
@ -209,9 +257,7 @@ static bool get_job_from_db(
|
|||
exit(1);
|
||||
}
|
||||
|
||||
// Check for collision
|
||||
// (i.e. this result already is in the array)
|
||||
// If collision, then advance to the next workitem
|
||||
// Check for collision (i.e. this result already is in the array)
|
||||
//
|
||||
collision = false;
|
||||
for (j=0; j<ssp->max_wu_results; j++) {
|
||||
|
@ -235,14 +281,30 @@ static bool get_job_from_db(
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (!collision) {
|
||||
return true;
|
||||
if (collision) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// if using HR, check whether we've exceeded quota for this class
|
||||
//
|
||||
int hrt = ssp->apps[app_index].homogeneous_redundancy;
|
||||
if (hrt) {
|
||||
if (!hr_info.accept(hrt, wi.wu.hr_class)) {
|
||||
log_messages.printf(
|
||||
SCHED_MSG_LOG::MSG_DEBUG,
|
||||
"rejecting [RESULT#%d] because HR class %d/%d over quota\n",
|
||||
wi.res_id, hrt, wi.wu.hr_class
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false; // never reached
|
||||
}
|
||||
|
||||
// This function decides the interleaving used for -allapps.
|
||||
// Inputs:
|
||||
// n (number of weights)
|
||||
// k (length of vector)
|
||||
|
@ -290,6 +352,10 @@ static bool scan_work_array(vector<DB_WORK_ITEM> &work_items) {
|
|||
}
|
||||
}
|
||||
|
||||
if (using_hr) {
|
||||
hr_count_slots();
|
||||
}
|
||||
|
||||
for (i=0; i<ssp->max_wu_results; i++) {
|
||||
app_index = app_indices[i];
|
||||
if (enum_phase[app_index] == ENUM_OVER) continue;
|
||||
|
@ -405,6 +471,80 @@ void feeder_loop() {
|
|||
}
|
||||
}
|
||||
|
||||
// see if we're using HR, and if so initialize the necessary data structures
|
||||
//
|
||||
void hr_init() {
|
||||
int i, retval;
|
||||
bool apps_differ = false;
|
||||
bool some_app_uses_hr = false;
|
||||
int hrt, hr_type0 = ssp->apps[0].homogeneous_redundancy;
|
||||
|
||||
using_hr = false;
|
||||
|
||||
for (i=0; i<ssp->napps; i++) {
|
||||
hrt = ssp->apps[i].homogeneous_redundancy;
|
||||
if (hrt <0 || hrt >= HR_NTYPES) {
|
||||
log_messages.printf(SCHED_MSG_LOG::MSG_CRITICAL,
|
||||
"HR type %d out of range for app %d\n", hrt, i
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
if (hrt) some_app_uses_hr = true;
|
||||
if (hrt != hr_type0) apps_differ = true;
|
||||
}
|
||||
if (config.homogeneous_redundancy) {
|
||||
hrt = config.homogeneous_redundancy;
|
||||
if (hrt < 0 || hrt >= HR_NTYPES) {
|
||||
log_messages.printf(SCHED_MSG_LOG::MSG_CRITICAL,
|
||||
"Main HR type %d out of range\n", hrt
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
if (some_app_uses_hr) {
|
||||
log_messages.printf(SCHED_MSG_LOG::MSG_CRITICAL,
|
||||
"You can specify HR at global or app level, but not both\n"
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
for (i=0; i<ssp->napps; i++) {
|
||||
ssp->apps[i].homogeneous_redundancy = config.homogeneous_redundancy;
|
||||
ssp->apps[i].weight = 1;
|
||||
}
|
||||
} else {
|
||||
if (some_app_uses_hr) {
|
||||
if (apps_differ && !all_apps) {
|
||||
log_messages.printf(SCHED_MSG_LOG::MSG_CRITICAL,
|
||||
"You must use -allapps if apps have different HR\n"
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
return; // HR not being used
|
||||
}
|
||||
}
|
||||
using_hr = true;
|
||||
hr_info.init();
|
||||
retval = hr_info.read_file();
|
||||
if (retval) {
|
||||
log_messages.printf(SCHED_MSG_LOG::MSG_CRITICAL,
|
||||
"Can't read HR info file: %d\n", retval
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// find the weight for each HR type
|
||||
//
|
||||
for (i=0; i<ssp->napps; i++) {
|
||||
int hrt = ssp->apps[i].homogeneous_redundancy;
|
||||
hr_info.type_weights[hrt] += ssp->apps[i].weight;
|
||||
hr_info.type_being_used[hrt] = true;
|
||||
}
|
||||
|
||||
// compute the slot allocations for HR classes
|
||||
//
|
||||
hr_info.allocate(ssp->max_wu_results);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
int i, retval;
|
||||
void* p;
|
||||
|
@ -535,6 +675,7 @@ int main(int argc, char** argv) {
|
|||
napps = 1;
|
||||
}
|
||||
|
||||
hr_init();
|
||||
|
||||
feeder_loop();
|
||||
}
|
||||
|
|
62
sched/hr.C
62
sched/hr.C
|
@ -159,67 +159,5 @@ bool hr_unknown_platform_type(HOST& host, int hr_type) {
|
|||
return false;
|
||||
}
|
||||
|
||||
void HR_INFO::write_file(const char* filename) {
|
||||
int i, j;
|
||||
|
||||
FILE* f = fopen(filename, "w");
|
||||
for (i=1; i<HR_NTYPES; i++) {
|
||||
fprintf(f, "--------- %s ----------\n", hr_names[i]);
|
||||
for (j=0; j<hr_nclasses[i]; j++) {
|
||||
fprintf(f, "%d %f\n", j, rac_per_class[i][j]);
|
||||
}
|
||||
}
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
void HR_INFO::read_file(const char* filename) {
|
||||
char buf[256];
|
||||
FILE* f = fopen(filename, "r");
|
||||
int i, j, jj;
|
||||
double x;
|
||||
|
||||
for (i=1; i<HR_NTYPES; i++) {
|
||||
fgets(buf, sizeof(buf), f);
|
||||
for (j=0; j<hr_nclasses[i]; j++) {
|
||||
int n = fscanf(f, "%d %lf", &jj, &x);
|
||||
if (n!=2 || j!=jj) {
|
||||
fprintf(stderr, "huh?? %d != %d\n", j, jj);
|
||||
exit(1);
|
||||
}
|
||||
rac_per_class[i][j] = x;
|
||||
}
|
||||
}
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
void HR_INFO::scan_db() {
|
||||
DB_HOST host;
|
||||
int retval;
|
||||
int i;
|
||||
|
||||
for (i=1; i<HR_NTYPES; i++) {
|
||||
rac_per_class[i] = (double*) calloc(hr_nclasses[i], sizeof(double));
|
||||
}
|
||||
while (1) {
|
||||
retval = host.enumerate("where expavg_credit>1");
|
||||
if (retval) break;
|
||||
printf("host %d: %s | %s | %s\n", host.id, host.os_name, host.p_vendor, host.p_model);
|
||||
for (i=1; i<HR_NTYPES; i++) {
|
||||
if (hr_unknown_platform_type(host, i)) {
|
||||
printf("type %d: unknown\n", i);
|
||||
continue;
|
||||
}
|
||||
int hrc = hr_class(host, i);
|
||||
printf("type %d: class %d\n", i, hrc);
|
||||
if (!hrc) continue;
|
||||
rac_per_class[i][hrc] += host.expavg_credit;
|
||||
}
|
||||
}
|
||||
if (retval != ERR_DB_NOT_FOUND) {
|
||||
fprintf(stderr, "host enum: %d", retval);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
const char* hr_names[HR_NTYPES] = {"", "fine", "coarse"};
|
||||
int hr_nclasses[HR_NTYPES] = {0, 768, 768};
|
||||
|
|
12
sched/hr.h
12
sched/hr.h
|
@ -20,15 +20,9 @@
|
|||
#include "boinc_db.h"
|
||||
|
||||
#define HR_NTYPES 3
|
||||
|
||||
struct HR_INFO {
|
||||
double *rac_per_class[HR_NTYPES];
|
||||
void write_file(const char*);
|
||||
void read_file(const char*);
|
||||
void scan_db();
|
||||
};
|
||||
// actually ntypes+1 (0 is reserved for "no HR")
|
||||
|
||||
extern int hr_class(HOST&, int hr_type);
|
||||
extern bool hr_unknown_platform_type(HOST&, int hr_type);
|
||||
extern const char* hr_names[];
|
||||
extern int hr_nclasses[];
|
||||
extern const char* hr_names[HR_NTYPES];
|
||||
extern int hr_nclasses[HR_NTYPES];
|
||||
|
|
Loading…
Reference in New Issue