mirror of https://github.com/BOINC/boinc.git
- server: major improvements to locality scheduling from Einstein@home.
Triggering the work generator is now done via the DB instead of flat files. Since only E@h uses locality scheduling, I kept the DB changes in a separate file (db/schema_locality.sql). There's a new field in the workunit table, and that's a required update (in db_update.php) - manager: compile fix svn path=/trunk/boinc/; revision=20807
This commit is contained in:
parent
de2fac548c
commit
295d4b54ea
|
@ -1713,3 +1713,28 @@ David 5 Mar 2010
|
|||
client_types.h
|
||||
app.cpp,h
|
||||
cpu_sched.cpp
|
||||
|
||||
David 5 Mar 2010
|
||||
- server: major improvements to locality scheduling from Einstein@home.
|
||||
Triggering the work generator is now done via the DB
|
||||
instead of flat files.
|
||||
|
||||
Since only E@h uses locality scheduling,
|
||||
I kept the DB changes in a separate file (db/schema_locality.sql).
|
||||
There's a new field in the workunit table,
|
||||
and that's a required update (in db_update.php)
|
||||
- manager: compile fix
|
||||
|
||||
clientgui/
|
||||
DlgEventLog.h
|
||||
db/
|
||||
boinc_db.cpp,h
|
||||
schema.sql
|
||||
schema_locality.sql
|
||||
sched/
|
||||
sched_locality.cpp
|
||||
Makefile.am
|
||||
html/ops
|
||||
db_update.php
|
||||
lib/
|
||||
Makefile.am
|
||||
|
|
|
@ -109,7 +109,7 @@ public:
|
|||
void OnMessagesCopySelected( wxCommandEvent& event );
|
||||
|
||||
/// wxEVT_COMMAND_BUTTON_CLICKED event handler for ID_TASK_MESSAGES_FILTERBYPROJECT
|
||||
void CDlgEventLog::OnMessagesFilter( wxCommandEvent& event );
|
||||
void OnMessagesFilter( wxCommandEvent& event );
|
||||
|
||||
/// wxEVT_COMMAND_BUTTON_CLICKED event handler for ID_SIMPLE_HELP
|
||||
void OnButtonHelp( wxCommandEvent& event );
|
||||
|
|
325
db/boinc_db.cpp
325
db/boinc_db.cpp
|
@ -70,6 +70,18 @@ void VALIDATOR_ITEM::clear() {memset(this, 0, sizeof(*this));}
|
|||
void SCHED_RESULT_ITEM::clear() {memset(this, 0, sizeof(*this));}
|
||||
void CREDIT_MULTIPLIER::clear() {memset(this, 0, sizeof(*this));}
|
||||
void STATE_COUNTS::clear() {memset(this, 0, sizeof(*this));}
|
||||
void FILE_ITEM::clear() {memset(this, 0, sizeof(*this));}
|
||||
void FILESET_ITEM::clear() {memset(this, 0, sizeof(*this));}
|
||||
void FILESET_FILE_ITEM::clear() {memset(this, 0, sizeof(*this));}
|
||||
void SCHED_TRIGGER_ITEM::clear() {
|
||||
id = 0;
|
||||
fileset_id = 0;
|
||||
need_work = false;
|
||||
work_available = false;
|
||||
no_work_available = false;
|
||||
working_set_removal = false;
|
||||
}
|
||||
void FILESET_SCHED_TRIGGER_ITEM::clear() {memset(this, 0, sizeof(*this));}
|
||||
|
||||
DB_PLATFORM::DB_PLATFORM(DB_CONN* dc) :
|
||||
DB_BASE("platform", dc?dc:&boinc_db){}
|
||||
|
@ -112,6 +124,25 @@ DB_IN_PROGRESS_RESULT::DB_IN_PROGRESS_RESULT(DB_CONN* dc) :
|
|||
DB_BASE_SPECIAL(dc?dc:&boinc_db){}
|
||||
DB_SCHED_RESULT_ITEM_SET::DB_SCHED_RESULT_ITEM_SET(DB_CONN* dc) :
|
||||
DB_BASE_SPECIAL(dc?dc:&boinc_db){}
|
||||
DB_FILE::DB_FILE(DB_CONN* dc) :
|
||||
DB_BASE("file", dc?dc:&boinc_db){}
|
||||
DB_FILESET::DB_FILESET(DB_CONN* dc) :
|
||||
DB_BASE("fileset", dc?dc:&boinc_db){}
|
||||
DB_FILESET_FILE::DB_FILESET_FILE(DB_CONN* dc) :
|
||||
DB_BASE("fileset_file", dc?dc:&boinc_db){}
|
||||
DB_SCHED_TRIGGER::DB_SCHED_TRIGGER(DB_CONN* dc) :
|
||||
DB_BASE("sched_trigger", dc?dc:&boinc_db) {
|
||||
id = 0;
|
||||
fileset_id = 0;
|
||||
need_work = false;
|
||||
work_available = false;
|
||||
no_work_available = false;
|
||||
working_set_removal = false;
|
||||
}
|
||||
DB_FILESET_SCHED_TRIGGER_ITEM::DB_FILESET_SCHED_TRIGGER_ITEM(DB_CONN* dc) :
|
||||
DB_BASE_SPECIAL(dc?dc:&boinc_db){}
|
||||
DB_FILESET_SCHED_TRIGGER_ITEM_SET::DB_FILESET_SCHED_TRIGGER_ITEM_SET(DB_CONN* dc) :
|
||||
DB_BASE_SPECIAL(dc?dc:&boinc_db){}
|
||||
|
||||
int DB_PLATFORM::get_id() {return id;}
|
||||
int DB_APP::get_id() {return id;}
|
||||
|
@ -126,6 +157,9 @@ int DB_MSG_TO_HOST::get_id() {return id;}
|
|||
int DB_ASSIGNMENT::get_id() {return id;}
|
||||
int DB_CREDIT_MULTIPLIER::get_id() {return id;}
|
||||
int DB_STATE_COUNTS::get_id() {return appid;}
|
||||
int DB_FILE::get_id() {return id;}
|
||||
int DB_FILESET::get_id() {return id;}
|
||||
int DB_SCHED_TRIGGER::get_id() {return id;}
|
||||
|
||||
void DB_PLATFORM::db_print(char* buf){
|
||||
sprintf(buf,
|
||||
|
@ -661,7 +695,8 @@ void DB_WORKUNIT::db_print(char* buf){
|
|||
"max_total_results=%d, max_success_results=%d, "
|
||||
"result_template_file='%s', "
|
||||
"priority=%d, "
|
||||
"rsc_bandwidth_bound=%.15e ",
|
||||
"rsc_bandwidth_bound=%.15e, "
|
||||
"fileset_id=%d ",
|
||||
create_time, appid,
|
||||
name, xml_doc, batch,
|
||||
rsc_fpops_est, rsc_fpops_bound, rsc_memory_bound, rsc_disk_bound,
|
||||
|
@ -677,7 +712,8 @@ void DB_WORKUNIT::db_print(char* buf){
|
|||
max_success_results,
|
||||
result_template_file,
|
||||
priority,
|
||||
rsc_bandwidth_bound
|
||||
rsc_bandwidth_bound,
|
||||
fileset_id
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -713,6 +749,7 @@ void DB_WORKUNIT::db_parse(MYSQL_ROW &r) {
|
|||
priority = atoi(r[i++]);
|
||||
strcpy2(mod_time, r[i++]);
|
||||
rsc_bandwidth_bound = atof(r[i++]);
|
||||
fileset_id = atoi(r[i++]);
|
||||
}
|
||||
|
||||
void DB_CREDITED_JOB::db_print(char* buf){
|
||||
|
@ -1457,6 +1494,7 @@ void WORK_ITEM::parse(MYSQL_ROW& r) {
|
|||
wu.priority = atoi(r[i++]);
|
||||
strcpy2(wu.mod_time, r[i++]);
|
||||
wu.rsc_bandwidth_bound = atof(r[i++]);
|
||||
wu.fileset_id = atoi(r[i++]);
|
||||
}
|
||||
|
||||
int DB_WORK_ITEM::enumerate(
|
||||
|
@ -1773,4 +1811,287 @@ int DB_SCHED_RESULT_ITEM_SET::update_workunits() {
|
|||
}
|
||||
}
|
||||
|
||||
void DB_FILE::db_print(char* buf){
|
||||
snprintf(buf, MAX_QUERY_LEN,
|
||||
"name='%s', md5sum=%s, size=%f",
|
||||
name, md5sum, size
|
||||
);
|
||||
}
|
||||
|
||||
void DB_FILE::db_parse(MYSQL_ROW &r) {
|
||||
int i=0;
|
||||
clear();
|
||||
id = atoi(r[i++]);
|
||||
strcpy2(name, r[i++]);
|
||||
strcpy2(md5sum, r[i++]);
|
||||
size = atof(r[i++]);
|
||||
}
|
||||
|
||||
void DB_FILESET::db_print(char* buf){
|
||||
snprintf(buf, MAX_QUERY_LEN, "name='%s'", name);
|
||||
}
|
||||
|
||||
void DB_FILESET::db_parse(MYSQL_ROW &r) {
|
||||
int i=0;
|
||||
clear();
|
||||
id = atoi(r[i++]);
|
||||
strcpy2(name, r[i++]);
|
||||
}
|
||||
|
||||
int DB_FILESET::select_by_name(const char* name) {
|
||||
char where_clause[MAX_QUERY_LEN] = {0};
|
||||
|
||||
// construct where clause and select single record
|
||||
snprintf(where_clause, MAX_QUERY_LEN, "WHERE name = '%s'", name);
|
||||
return lookup(where_clause);
|
||||
}
|
||||
|
||||
void DB_FILESET_FILE::db_print(char* buf){
|
||||
snprintf(buf, MAX_QUERY_LEN,
|
||||
"fileset_id=%d, file_id=%d",
|
||||
fileset_id, file_id
|
||||
);
|
||||
}
|
||||
|
||||
void DB_FILESET_FILE::db_parse(MYSQL_ROW &r) {
|
||||
int i=0;
|
||||
clear();
|
||||
fileset_id = atoi(r[i++]);
|
||||
file_id = atoi(r[i++]);
|
||||
}
|
||||
|
||||
void DB_SCHED_TRIGGER::db_print(char* buf){
|
||||
snprintf(buf, MAX_QUERY_LEN,
|
||||
"fileset_id=%d, need_work=%d, work_available=%d, no_work_available=%d, working_set_removal=%d",
|
||||
fileset_id, need_work?1:0, work_available?1:0, no_work_available?1:0, working_set_removal?1:0
|
||||
);
|
||||
}
|
||||
|
||||
void DB_SCHED_TRIGGER::db_parse(MYSQL_ROW &r) {
|
||||
int i=0;
|
||||
clear();
|
||||
id = atoi(r[i++]);
|
||||
fileset_id = atoi(r[i++]);
|
||||
need_work = atoi(r[i++]);
|
||||
work_available = atoi(r[i++]);
|
||||
no_work_available = atoi(r[i++]);
|
||||
working_set_removal = atoi(r[i++]);
|
||||
}
|
||||
|
||||
int DB_SCHED_TRIGGER::select_unique_by_fileset_name(const char* fileset_name) {
|
||||
char query[MAX_QUERY_LEN];
|
||||
int retval;
|
||||
int count = 0;
|
||||
MYSQL_RES* recordset;
|
||||
MYSQL_ROW row;
|
||||
|
||||
if (!cursor.active) {
|
||||
// prepare statement
|
||||
snprintf(query, MAX_QUERY_LEN,
|
||||
"SELECT"
|
||||
" t.id,"
|
||||
" t.fileset_id,"
|
||||
" t.need_work,"
|
||||
" t.work_available,"
|
||||
" t.no_work_available,"
|
||||
" t.working_set_removal "
|
||||
"FROM"
|
||||
" fileset fs INNER JOIN sched_trigger t ON fs.id = t.fileset_id "
|
||||
"WHERE"
|
||||
" fs.name = '%s'",
|
||||
fileset_name
|
||||
);
|
||||
|
||||
retval = db->do_query(query);
|
||||
|
||||
if (retval) return mysql_errno(db->mysql);
|
||||
|
||||
recordset = mysql_store_result(db->mysql);
|
||||
if (!recordset) return mysql_errno(db->mysql);
|
||||
}
|
||||
|
||||
// determine number of records, fetch first
|
||||
count = mysql_num_rows(recordset);
|
||||
row = mysql_fetch_row(recordset);
|
||||
|
||||
if (!row || count != 1) {
|
||||
// something bad happened
|
||||
if (!row) {
|
||||
// no row returned, due to an error?
|
||||
retval = mysql_errno(db->mysql);
|
||||
mysql_free_result(recordset);
|
||||
|
||||
// yes, probably lost DB connection
|
||||
if (retval) return ERR_DB_CONN_LOST;
|
||||
|
||||
// no, just no record available
|
||||
return ERR_DB_NOT_FOUND;
|
||||
}
|
||||
else {
|
||||
// we got more records than expected
|
||||
mysql_free_result(recordset);
|
||||
return ERR_DB_NOT_UNIQUE;
|
||||
}
|
||||
} else {
|
||||
// all fine, parse single record
|
||||
db_parse(row);
|
||||
mysql_free_result(recordset);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DB_SCHED_TRIGGER::update_single_state(const DB_SCHED_TRIGGER::STATE state, const bool value) {
|
||||
char column_clause[MAX_QUERY_LEN] = {0};
|
||||
int retval = 0;
|
||||
|
||||
switch(state) {
|
||||
case DB_SCHED_TRIGGER::state_need_work:
|
||||
snprintf(column_clause, MAX_QUERY_LEN, "need_work = %d", value?1:0);
|
||||
need_work = value;
|
||||
break;
|
||||
case DB_SCHED_TRIGGER::state_work_available:
|
||||
snprintf(column_clause, MAX_QUERY_LEN, "work_available = %d", value?1:0);
|
||||
work_available = value;
|
||||
break;
|
||||
case DB_SCHED_TRIGGER::state_no_work_available:
|
||||
snprintf(column_clause, MAX_QUERY_LEN, "no_work_available = %d", value?1:0);
|
||||
no_work_available = value;
|
||||
break;
|
||||
case DB_SCHED_TRIGGER::state_working_set_removal:
|
||||
snprintf(column_clause, MAX_QUERY_LEN, "working_set_removal = %d", value?1:0);
|
||||
working_set_removal = value;
|
||||
break;
|
||||
default:
|
||||
// unknown state
|
||||
return -1;
|
||||
}
|
||||
|
||||
// run actual update on current trigger (retrieved earlier)
|
||||
retval = update_field(column_clause, NULL);
|
||||
|
||||
if (retval) return retval;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void DB_FILESET_SCHED_TRIGGER_ITEM::db_parse(MYSQL_ROW &r) {
|
||||
int i=0;
|
||||
clear();
|
||||
fileset.id = atoi(r[i++]);
|
||||
strcpy2(fileset.name, r[i++]);
|
||||
trigger.id = atoi(r[i++]);
|
||||
trigger.fileset_id = atoi(r[i++]);
|
||||
trigger.need_work = atoi(r[i++]);
|
||||
trigger.work_available = atoi(r[i++]);
|
||||
trigger.no_work_available = atoi(r[i++]);
|
||||
trigger.working_set_removal = atoi(r[i++]);
|
||||
}
|
||||
|
||||
int DB_FILESET_SCHED_TRIGGER_ITEM_SET::select_by_name_state(
|
||||
const char* fileset_name = NULL,
|
||||
const bool use_regexp = false,
|
||||
const DB_SCHED_TRIGGER::STATE state = DB_SCHED_TRIGGER::none,
|
||||
const bool state_value = true
|
||||
) {
|
||||
char where_clause[MAX_QUERY_LEN] = {0};
|
||||
char query[MAX_QUERY_LEN] = {0};
|
||||
int retval = 0;
|
||||
int count = 0;
|
||||
MYSQL_RES* recordset;
|
||||
MYSQL_ROW row;
|
||||
DB_FILESET_SCHED_TRIGGER_ITEM fileset_trigger;
|
||||
|
||||
// prepare requested compare mode
|
||||
const char* comparator = use_regexp ? "REGEXP" : "=";
|
||||
|
||||
// prepare optional state filter
|
||||
char state_filter[MAX_QUERY_LEN] = {0};
|
||||
switch(state) {
|
||||
case DB_SCHED_TRIGGER::state_need_work:
|
||||
snprintf(state_filter, MAX_QUERY_LEN, "need_work = %d", state_value?1:0);
|
||||
break;
|
||||
case DB_SCHED_TRIGGER::state_work_available:
|
||||
snprintf(state_filter, MAX_QUERY_LEN, "work_available = %d", state_value?1:0);
|
||||
break;
|
||||
case DB_SCHED_TRIGGER::state_no_work_available:
|
||||
snprintf(state_filter, MAX_QUERY_LEN, "no_work_available = %d", state_value?1:0);
|
||||
break;
|
||||
case DB_SCHED_TRIGGER::state_working_set_removal:
|
||||
snprintf(state_filter, MAX_QUERY_LEN, "working_set_removal = %d", state_value?1:0);
|
||||
break;
|
||||
default:
|
||||
// none or unknown state (keep empty filter)
|
||||
break;
|
||||
}
|
||||
|
||||
// prepare WHERE clause
|
||||
if(fileset_name && !state) {
|
||||
snprintf(where_clause, MAX_QUERY_LEN, "WHERE fs.name %s '%s'", comparator, fileset_name);
|
||||
} else if(!fileset_name && state) {
|
||||
snprintf(where_clause, MAX_QUERY_LEN, "WHERE %s", state_filter);
|
||||
} else if(fileset_name && state) {
|
||||
snprintf(where_clause, MAX_QUERY_LEN, "WHERE fs.name %s '%s' AND %s", comparator, fileset_name, state_filter);
|
||||
}
|
||||
|
||||
// prepare final statement
|
||||
snprintf(query, MAX_QUERY_LEN,
|
||||
"SELECT"
|
||||
" fs.id,"
|
||||
" fs.name,"
|
||||
" t.id,"
|
||||
" t.fileset_id,"
|
||||
" t.need_work,"
|
||||
" t.work_available,"
|
||||
" t.no_work_available,"
|
||||
" t.working_set_removal "
|
||||
"FROM"
|
||||
" fileset fs INNER JOIN sched_trigger t ON fs.id = t.fileset_id "
|
||||
"%s",
|
||||
where_clause
|
||||
);
|
||||
|
||||
retval = db->do_query(query);
|
||||
if (retval) return retval;
|
||||
|
||||
recordset = mysql_store_result(db->mysql);
|
||||
if (!recordset) return mysql_errno(db->mysql);
|
||||
|
||||
// check if we got at least one record
|
||||
count = mysql_num_rows(recordset);
|
||||
if(count == 0) {
|
||||
mysql_free_result(recordset);
|
||||
return ERR_DB_NOT_FOUND;
|
||||
}
|
||||
|
||||
// all fine, iterate over recordset
|
||||
do {
|
||||
row = mysql_fetch_row(recordset);
|
||||
if (!row) {
|
||||
// clean up
|
||||
mysql_free_result(recordset);
|
||||
|
||||
// no row returned, due to an error?
|
||||
retval = mysql_errno(db->mysql);
|
||||
// yes, probably lost DB connection
|
||||
if (retval) return ERR_DB_CONN_LOST;
|
||||
} else {
|
||||
// parse record, add to vector
|
||||
fileset_trigger.db_parse(row);
|
||||
items.push_back(fileset_trigger);
|
||||
}
|
||||
} while (row);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int DB_FILESET_SCHED_TRIGGER_ITEM_SET::contains_trigger(const char* fileset_name) {
|
||||
// iterate over item vector
|
||||
for(int i = 0; i < items.size(); ++i) {
|
||||
if(strcmp(items[i].fileset.name, fileset_name) == 0) {
|
||||
// return 1-indexed position for boolean tests
|
||||
return i+1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
const char *BOINC_RCSID_ac374386c8 = "$Id$";
|
||||
|
|
127
db/boinc_db.h
127
db/boinc_db.h
|
@ -369,8 +369,6 @@ struct WORKUNIT {
|
|||
// used for 2 purposes:
|
||||
// 1) for scheduling (don't send this WU to a host w/ insuff. disk)
|
||||
// 2) abort task if it uses more than this disk
|
||||
double rsc_bandwidth_bound;
|
||||
// send only to hosts with at least this much download bandwidth
|
||||
bool need_validate; // this WU has at least 1 result in
|
||||
// validate state = NEED_CHECK
|
||||
int canonical_resultid; // ID of canonical result, or zero
|
||||
|
@ -399,6 +397,9 @@ struct WORKUNIT {
|
|||
char result_template_file[64];
|
||||
int priority;
|
||||
char mod_time[16];
|
||||
double rsc_bandwidth_bound;
|
||||
// send only to hosts with at least this much download bandwidth
|
||||
int fileset_id;
|
||||
|
||||
// the following not used in the DB
|
||||
char app_name[256];
|
||||
|
@ -629,7 +630,6 @@ struct VALIDATOR_ITEM {
|
|||
void parse(MYSQL_ROW&);
|
||||
};
|
||||
|
||||
|
||||
|
||||
class DB_PLATFORM : public DB_BASE, public PLATFORM {
|
||||
public:
|
||||
|
@ -875,4 +875,125 @@ public:
|
|||
int update_workunits();
|
||||
};
|
||||
|
||||
struct FILE_ITEM {
|
||||
int id;
|
||||
char name[254];
|
||||
char md5sum[34];
|
||||
double size;
|
||||
|
||||
void clear();
|
||||
};
|
||||
|
||||
class DB_FILE : public DB_BASE, public FILE_ITEM {
|
||||
public:
|
||||
DB_FILE(DB_CONN* p=0);
|
||||
int get_id();
|
||||
void db_print(char*);
|
||||
void db_parse(MYSQL_ROW &row);
|
||||
void operator=(FILE_ITEM& f) {FILE_ITEM::operator=(f);}
|
||||
};
|
||||
|
||||
struct FILESET_ITEM {
|
||||
int id;
|
||||
char name[254];
|
||||
|
||||
void clear();
|
||||
};
|
||||
|
||||
class DB_FILESET : public DB_BASE, public FILESET_ITEM {
|
||||
public:
|
||||
DB_FILESET(DB_CONN* p=0);
|
||||
int get_id();
|
||||
void db_print(char*);
|
||||
void db_parse(MYSQL_ROW &row);
|
||||
void operator=(FILESET_ITEM& f) {FILESET_ITEM::operator=(f);}
|
||||
|
||||
// retrieve fileset instance (populate object)
|
||||
int select_by_name(const char* name);
|
||||
};
|
||||
|
||||
struct FILESET_FILE_ITEM {
|
||||
int fileset_id;
|
||||
int file_id;
|
||||
|
||||
void clear();
|
||||
};
|
||||
|
||||
class DB_FILESET_FILE : public DB_BASE, public FILESET_FILE_ITEM {
|
||||
public:
|
||||
DB_FILESET_FILE(DB_CONN* p=0);
|
||||
void db_print(char*);
|
||||
void db_parse(MYSQL_ROW &row);
|
||||
void operator=(FILESET_FILE_ITEM& tf) {FILESET_FILE_ITEM::operator=(tf);}
|
||||
};
|
||||
|
||||
struct SCHED_TRIGGER_ITEM {
|
||||
int id;
|
||||
int fileset_id;
|
||||
bool need_work;
|
||||
bool work_available;
|
||||
bool no_work_available;
|
||||
bool working_set_removal;
|
||||
|
||||
void clear();
|
||||
};
|
||||
|
||||
class DB_SCHED_TRIGGER : public DB_BASE, public SCHED_TRIGGER_ITEM {
|
||||
public:
|
||||
DB_SCHED_TRIGGER(DB_CONN* p=0);
|
||||
int get_id();
|
||||
void db_print(char*);
|
||||
void db_parse(MYSQL_ROW &row);
|
||||
void operator=(SCHED_TRIGGER_ITEM& t) {SCHED_TRIGGER_ITEM::operator=(t);}
|
||||
|
||||
typedef enum {
|
||||
none = 0,
|
||||
state_need_work = 1,
|
||||
state_work_available = 2,
|
||||
state_no_work_available = 3,
|
||||
state_working_set_removal = 4
|
||||
} STATE;
|
||||
|
||||
// retrieve trigger instance (populate object)
|
||||
int select_unique_by_fileset_name(const char* fileset_name);
|
||||
// set single trigger state
|
||||
int update_single_state(const DB_SCHED_TRIGGER::STATE state, const bool value);
|
||||
};
|
||||
|
||||
struct FILESET_SCHED_TRIGGER_ITEM {
|
||||
FILESET_ITEM fileset;
|
||||
SCHED_TRIGGER_ITEM trigger;
|
||||
|
||||
void clear();
|
||||
};
|
||||
|
||||
class DB_FILESET_SCHED_TRIGGER_ITEM : public DB_BASE_SPECIAL, public FILESET_SCHED_TRIGGER_ITEM {
|
||||
public:
|
||||
DB_FILESET_SCHED_TRIGGER_ITEM(DB_CONN* p=0);
|
||||
void db_parse(MYSQL_ROW &row);
|
||||
void operator=(FILESET_SCHED_TRIGGER_ITEM& fst) {FILESET_SCHED_TRIGGER_ITEM::operator=(fst);}
|
||||
};
|
||||
|
||||
class DB_FILESET_SCHED_TRIGGER_ITEM_SET : public DB_BASE_SPECIAL {
|
||||
public:
|
||||
DB_FILESET_SCHED_TRIGGER_ITEM_SET(DB_CONN* p=0);
|
||||
|
||||
// select available triggers based on name and/or state
|
||||
// -> name filter optional (set string, default NULL)
|
||||
// -> pattern search optional (set use_regexp to true, default false))
|
||||
// -> state filter optional (set state, default none)
|
||||
// -> state_value (default true)
|
||||
int select_by_name_state(
|
||||
const char* fileset_name,
|
||||
const bool use_regexp,
|
||||
const DB_SCHED_TRIGGER::STATE state,
|
||||
const bool state_value);
|
||||
|
||||
// check if given trigger (fileset name) is part of set and return position (1-indexed)
|
||||
int contains_trigger(const char* fileset_name);
|
||||
|
||||
// storage vector
|
||||
std::vector<DB_FILESET_SCHED_TRIGGER_ITEM> items;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
|
|
@ -214,6 +214,7 @@ create table workunit (
|
|||
priority integer not null,
|
||||
mod_time timestamp,
|
||||
rsc_bandwidth_bound double not null,
|
||||
fileset_id integer not null,
|
||||
primary key (id)
|
||||
) engine=InnoDB;
|
||||
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
create table file (
|
||||
id integer not null auto_increment,
|
||||
name varchar(254) not null,
|
||||
md5sum varchar(32) not null,
|
||||
size double not null default 0,
|
||||
primary key (id)
|
||||
) engine=InnoDB;
|
||||
|
||||
alter table file add unique(name);
|
||||
|
||||
create table fileset (
|
||||
id integer not null auto_increment,
|
||||
name varchar(254) not null,
|
||||
primary key (id)
|
||||
) engine=InnoDB;
|
||||
|
||||
alter table fileset add unique(name);
|
||||
|
||||
create table fileset_file (
|
||||
fileset_id integer not null,
|
||||
file_id integer not null,
|
||||
primary key (fileset_id, file_id),
|
||||
foreign key (fileset_id) references fileset(id) on delete cascade,
|
||||
foreign key (file_id) references file(id) on delete cascade
|
||||
) engine=InnoDB;
|
||||
|
||||
create table sched_trigger (
|
||||
id integer not null auto_increment,
|
||||
fileset_id integer not null,
|
||||
need_work integer not null default 0,
|
||||
work_available integer not null default 0,
|
||||
no_work_available integer not null default 0,
|
||||
working_set_removal integer not null default 0,
|
||||
primary key (id)
|
||||
) engine=InnoDB;
|
||||
|
||||
alter table sched_trigger
|
||||
add constraint foreign key(fileset_id) references fileset(id),
|
||||
add unique(fileset_id),
|
||||
add index(need_work),
|
||||
add index(work_available),
|
||||
add index(no_work_available),
|
||||
add index(working_set_removal);
|
|
@ -653,6 +653,10 @@ function update_9_3_2009() {
|
|||
");
|
||||
}
|
||||
|
||||
function update_3_5_2010() {
|
||||
do_query("alter table workunit add fileset_id integer not null");
|
||||
}
|
||||
|
||||
// Updates are done automatically if you use "upgrade".
|
||||
//
|
||||
// If you need to do updates manually,
|
||||
|
@ -666,6 +670,7 @@ function update_9_3_2009() {
|
|||
$db_updates = array (
|
||||
array(18490, "update_6_16_2009"),
|
||||
array(19001, "update_9_3_2009"),
|
||||
array(20807, "update_3_5_2010"),
|
||||
);
|
||||
|
||||
?>
|
||||
|
|
|
@ -90,6 +90,7 @@ endif
|
|||
if INSTALL_HEADERS
|
||||
pkginclude_HEADERS = \
|
||||
app_ipc.h \
|
||||
average.h \
|
||||
base64.h \
|
||||
boinc_fcgi.h \
|
||||
boinc_win.h \
|
||||
|
|
|
@ -29,6 +29,7 @@ libsched_la_LIBADD= $(SSL_LIBS)
|
|||
## install only headers that are meant for exporting the API !!
|
||||
if INSTALL_HEADERS
|
||||
pkginclude_HEADERS = \
|
||||
credit.h \
|
||||
sched_config.h \
|
||||
sched_msgs.h \
|
||||
sched_util.h \
|
||||
|
|
|
@ -307,101 +307,138 @@ static int possibly_send_result(DB_RESULT& result) {
|
|||
return add_result_to_reply(result, wu, bavp, true);
|
||||
}
|
||||
|
||||
// returns true if the work generator can not make more work for this
|
||||
// file, false if it can.
|
||||
// Retrieves and returns a trigger instance identified by the given
|
||||
// fileset name.
|
||||
//
|
||||
static bool work_generation_over(char *filename) {
|
||||
return boinc_file_exists(config.project_path("locality_scheduling/no_work_available/%s", filename));
|
||||
static bool retrieve_single_trigger_by_fileset_name(char *fileset_name, DB_SCHED_TRIGGER& trigger) {
|
||||
int retval = 0;
|
||||
|
||||
// retrieve trigger
|
||||
retval = trigger.select_unique_by_fileset_name(fileset_name);
|
||||
if(!retval) {
|
||||
if (config.debug_locality) {
|
||||
log_messages.printf(MSG_DEBUG,
|
||||
"[locality] trigger %s state after retrieval: nw=%i wa=%i nwa=%i wsr=%i\n",
|
||||
fileset_name,
|
||||
trigger.need_work,
|
||||
trigger.work_available,
|
||||
trigger.no_work_available,
|
||||
trigger.working_set_removal
|
||||
);
|
||||
}
|
||||
|
||||
// successful retrieval
|
||||
return true;
|
||||
}
|
||||
else if(retval == ERR_DB_NOT_FOUND) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[locality] trigger retrieval for filename %s returned empty set\n", fileset_name
|
||||
);
|
||||
return false;
|
||||
}
|
||||
else {
|
||||
log_messages.printf(MSG_CRITICAL,
|
||||
"[locality] trigger retrieval for filename %s failed with error %i\n", fileset_name, retval
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Ask the WU generator to make more WUs for this file.
|
||||
// Returns nonzero if can't make more work.
|
||||
// Returns zero if it *might* have made more work
|
||||
// (no way to be sure if it suceeded).
|
||||
// (no way to be sure if it succeeded).
|
||||
//
|
||||
int make_more_work_for_file(char* filename) {
|
||||
const char *fullpath;
|
||||
int retval = 0;
|
||||
DB_SCHED_TRIGGER trigger;
|
||||
|
||||
if (work_generation_over(filename)) {
|
||||
// since we found this file, it means that no work remains for this WU.
|
||||
// So give up trying to interact with the WU generator.
|
||||
|
||||
if (!retrieve_single_trigger_by_fileset_name(filename, trigger)) {
|
||||
// trigger retrieval failed (message logged by previous method)
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Check if there's remaining work for this WU
|
||||
if (trigger.no_work_available) {
|
||||
// Give up trying to interact with the WU generator.
|
||||
if (config.debug_locality) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[locality] work generator says no work remaining for file %s\n", filename
|
||||
"[locality] work generator says no work remaining for trigger %s\n", filename
|
||||
);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
// open and touch a file in the need_work/
|
||||
// directory as a way of indicating that we need work for this file.
|
||||
// If this operation fails, don't worry or tarry!
|
||||
//
|
||||
fullpath = config.project_path("locality_scheduling/need_work/%s", filename);
|
||||
if (boinc_touch_file(fullpath)) {
|
||||
log_messages.printf(MSG_CRITICAL, "unable to touch %s\n", fullpath);
|
||||
// // FIXME: should we reset these? The old code didn't do any consistency checks...
|
||||
// trigger.work_available = false;
|
||||
// trigger.no_work_available = false;
|
||||
// trigger.working_set_removal = false;
|
||||
|
||||
// set trigger state to need_work as a way of indicating that we need work
|
||||
// for this fileset. If this operation fails, don't worry or tarry!
|
||||
retval = trigger.update_single_state(DB_SCHED_TRIGGER::state_need_work, true);
|
||||
if (retval) {
|
||||
log_messages.printf(MSG_CRITICAL, "unable to set need_work state for trigger %s (error: %d)\n", filename, retval);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (config.debug_locality) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[locality] touched %s: need work for file %s\n", fullpath, filename
|
||||
);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Get a randomly-chosen filename in the working set.
|
||||
//
|
||||
// We store a static list to prevent duplicate filename returns
|
||||
// and to cut down on invocations of glob
|
||||
// and to cut down on DB queries
|
||||
//
|
||||
//
|
||||
|
||||
std::vector<std::string> filenamelist;
|
||||
int list_type = 0; // 0: none, 1: slowhost, 2: fasthost
|
||||
|
||||
static void build_working_set_namelist(bool slowhost) {
|
||||
glob_t globbuf;
|
||||
int retglob;
|
||||
int retval = 0;
|
||||
unsigned int i;
|
||||
const char *pattern = config.project_path("locality_scheduling/work_available/*");
|
||||
const char *pattern = ".*";
|
||||
bool use_pattern = false;
|
||||
const char *errtype = "unrecognized error";
|
||||
const char *hosttype = "fasthost";
|
||||
DB_FILESET_SCHED_TRIGGER_ITEM_SET filesets;
|
||||
|
||||
#ifdef EINSTEIN_AT_HOME
|
||||
if (slowhost) {
|
||||
hosttype = "slowhost";
|
||||
pattern = config.project_path("locality_scheduling/work_available/*_0[0-3]*");
|
||||
pattern = ".*_0[0-3].*";
|
||||
use_pattern = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
retglob=glob(pattern, GLOB_ERR|GLOB_NOSORT|GLOB_NOCHECK, NULL, &globbuf);
|
||||
if(use_pattern) {
|
||||
retval = filesets.select_by_name_state(pattern, true, DB_SCHED_TRIGGER::state_work_available, true);
|
||||
}
|
||||
else {
|
||||
retval = filesets.select_by_name_state(NULL, false, DB_SCHED_TRIGGER::state_work_available, true);
|
||||
}
|
||||
|
||||
if (retglob || !globbuf.gl_pathc) {
|
||||
errtype = "no directory or not readable";
|
||||
} else {
|
||||
if (globbuf.gl_pathc==1 && !strcmp(pattern, globbuf.gl_pathv[0])) {
|
||||
errtype = "empty directory";
|
||||
} else {
|
||||
for (i=0; i<globbuf.gl_pathc; i++) {
|
||||
filenamelist.push_back(globbuf.gl_pathv[i]);
|
||||
}
|
||||
if (config.debug_locality) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[locality] build_working_set_namelist(%s): pattern %s has %d matches\n",
|
||||
hosttype, pattern, globbuf.gl_pathc
|
||||
);
|
||||
}
|
||||
globfree(&globbuf);
|
||||
return;
|
||||
if (retval == ERR_DB_NOT_FOUND) {
|
||||
errtype = "empty directory";
|
||||
}
|
||||
else if(!retval) {
|
||||
for (i=0; i<filesets.items.size(); i++) {
|
||||
filenamelist.push_back(filesets.items[i].fileset.name);
|
||||
}
|
||||
if (config.debug_locality) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[locality] build_working_set_namelist(%s): pattern %s has %d matches\n",
|
||||
hosttype, pattern, filesets.items.size()
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
log_messages.printf(MSG_CRITICAL,
|
||||
"build_working_set_namelist(%s): pattern %s not found (%s)\n", hosttype, pattern, errtype
|
||||
);
|
||||
globfree(&globbuf);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -439,28 +476,18 @@ static int get_working_set_filename(char *filename, bool slowhost) {
|
|||
filenamelist[random_file_num] = filenamelist.back();
|
||||
filenamelist.pop_back();
|
||||
|
||||
// locate trailing file name
|
||||
//
|
||||
std::string slash = "/";
|
||||
std::string::size_type last_slash_pos = thisname.rfind(slash);
|
||||
if (last_slash_pos == std::string::npos) {
|
||||
errtype = "no trailing slash";
|
||||
} else {
|
||||
// extract file name
|
||||
thisname = thisname.substr(last_slash_pos);
|
||||
if (thisname.length() < 2) {
|
||||
// final check
|
||||
if (thisname.length() < 1) {
|
||||
errtype = "zero length filename";
|
||||
} else {
|
||||
thisname = thisname.substr(1);
|
||||
strcpy(filename, thisname.c_str());
|
||||
if (config.debug_locality) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[locality] get_working_set_filename(%s): returning %s\n",
|
||||
hosttype, filename
|
||||
);
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
strcpy(filename, thisname.c_str());
|
||||
if (config.debug_locality) {
|
||||
log_messages.printf(MSG_NORMAL,
|
||||
"[locality] get_working_set_filename(%s): returning %s\n",
|
||||
hosttype, filename
|
||||
);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -471,9 +498,25 @@ static int get_working_set_filename(char *filename, bool slowhost) {
|
|||
}
|
||||
|
||||
|
||||
static void flag_for_possible_removal(char* filename) {
|
||||
boinc_touch_file(config.project_path("locality_scheduling/working_set_removal/%s", filename));
|
||||
return;
|
||||
static void flag_for_possible_removal(char* fileset_name) {
|
||||
int retval = 0;
|
||||
DB_SCHED_TRIGGER trigger;
|
||||
|
||||
if (!retrieve_single_trigger_by_fileset_name(fileset_name, trigger)) {
|
||||
// trigger retrieval failed (message logged by previous method)
|
||||
return;
|
||||
}
|
||||
|
||||
// // FIXME: should we reset these? The old code didn't do any consistency checks...
|
||||
// trigger.need_work = false;
|
||||
// trigger.work_available = false;
|
||||
// trigger.no_work_available = false;
|
||||
|
||||
// set trigger state to working_set_removal
|
||||
retval = trigger.update_single_state(DB_SCHED_TRIGGER::state_working_set_removal, true);
|
||||
if (retval) {
|
||||
log_messages.printf(MSG_CRITICAL, "unable to set working_set_removal state for trigger %s (error: %d)\n", fileset_name, retval);
|
||||
}
|
||||
}
|
||||
|
||||
// The client has (or will soon have) the given file.
|
||||
|
|
Loading…
Reference in New Issue