// This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2012 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see .
#include
#include "str_replace.h"
#include "url.h"
#include "client_msgs.h"
#include "client_state.h"
#include "log_flags.h"
#include "result.h"
#include "sandbox.h"
#include "project.h"
PROJECT::PROJECT() {
init();
}
void PROJECT::init() {
strcpy(master_url, "");
strcpy(authenticator, "");
strcpy(_project_dir, "");
strcpy(_project_dir_absolute, "");
project_specific_prefs = "";
gui_urls = "";
resource_share = 100;
desired_disk_usage = 0;
for (int i=0; ino_rsc_ams[i] = true;
}
static void handle_no_rsc_pref(PROJECT* p, const char* name) {
int i = rsc_index(name);
if (i<0) return;
p->no_rsc_pref[i] = true;
}
static void handle_no_rsc_apps(PROJECT* p, const char* name) {
int i = rsc_index(name);
if (i < 0) return;
p->no_rsc_apps[i] = true;
}
static bool parse_rsc_param(XML_PARSER& xp, const char* end_tag, int& rsc_type, double& value) {
char name[256];
bool val_found = false;
rsc_type = -1;
while (!xp.get_tag()) {
if (xp.match_tag(end_tag)) {
return (rsc_type > 0 && val_found);
}
if (xp.parse_str("name", name, sizeof(name))) {
rsc_type = rsc_index(name);
continue;
}
if (xp.parse_double("rsc_type", value)) {
val_found = true;
}
}
return false;
}
// parse project fields from client_state.xml
//
int PROJECT::parse_state(XML_PARSER& xp) {
char buf[256];
std::string sched_url, stemp;
string str1, str2;
int retval, rt;
double x;
bool btemp;
init();
while (!xp.get_tag()) {
if (xp.match_tag("/project")) {
if (cpid_time == 0) {
cpid_time = user_create_time;
}
if (dont_use_dcf) {
duration_correction_factor = 1;
}
return 0;
}
if (xp.parse_string("scheduler_url", sched_url)) {
scheduler_urls.push_back(sched_url);
continue;
}
if (xp.parse_str("master_url", master_url, sizeof(master_url))) continue;
if (xp.parse_str("project_name", project_name, sizeof(project_name))) continue;
if (xp.parse_str("symstore", symstore, sizeof(symstore))) continue;
if (xp.parse_str("user_name", user_name, sizeof(user_name))) continue;
if (xp.parse_str("team_name", team_name, sizeof(team_name))) continue;
if (xp.parse_str("host_venue", host_venue, sizeof(host_venue))) continue;
if (xp.parse_str("email_hash", email_hash, sizeof(email_hash))) continue;
if (xp.parse_str("cross_project_id", cross_project_id, sizeof(cross_project_id))) continue;
if (xp.parse_double("cpid_time", cpid_time)) continue;
if (xp.parse_double("user_total_credit", user_total_credit)) continue;
if (xp.parse_double("user_expavg_credit", user_expavg_credit)) continue;
if (xp.parse_double("user_create_time", user_create_time)) continue;
if (xp.parse_int("rpc_seqno", rpc_seqno)) continue;
if (xp.parse_int("userid", userid)) continue;
if (xp.parse_int("teamid", teamid)) continue;
if (xp.parse_int("hostid", hostid)) continue;
if (xp.parse_double("host_total_credit", host_total_credit)) continue;
if (xp.parse_double("host_expavg_credit", host_expavg_credit)) continue;
if (xp.parse_double("host_create_time", host_create_time)) continue;
if (xp.match_tag("code_sign_key")) {
retval = copy_element_contents(
xp.f->f,
"",
code_sign_key,
sizeof(code_sign_key)
);
if (retval) return retval;
strip_whitespace(code_sign_key);
continue;
}
if (xp.parse_int("nrpc_failures", nrpc_failures)) continue;
if (xp.parse_int("master_fetch_failures", master_fetch_failures)) continue;
if (xp.parse_double("min_rpc_time", min_rpc_time)) continue;
if (xp.parse_bool("master_url_fetch_pending", master_url_fetch_pending)) continue;
if (xp.parse_int("sched_rpc_pending", sched_rpc_pending)) continue;
if (xp.parse_double("next_rpc_time", next_rpc_time)) continue;
if (xp.parse_bool("trickle_up_pending", trickle_up_pending)) continue;
if (xp.parse_int("send_time_stats_log", send_time_stats_log)) continue;
if (xp.parse_int("send_job_log", send_job_log)) continue;
if (xp.parse_bool("send_full_workload", send_full_workload)) continue;
if (xp.parse_bool("dont_use_dcf", dont_use_dcf)) continue;
if (xp.parse_bool("non_cpu_intensive", non_cpu_intensive)) continue;
if (xp.parse_bool("verify_files_on_app_start", verify_files_on_app_start)) continue;
if (xp.parse_bool("suspended_via_gui", suspended_via_gui)) continue;
if (xp.parse_bool("dont_request_more_work", dont_request_more_work)) continue;
if (xp.parse_bool("detach_when_done", detach_when_done)) continue;
if (xp.parse_bool("ended", ended)) continue;
if (xp.parse_double("rec", pwf.rec)) continue;
if (xp.parse_double("rec_time", pwf.rec_time)) continue;
if (xp.parse_double("cpu_backoff_interval", rsc_pwf[0].backoff_interval)) continue;
if (xp.parse_double("cpu_backoff_time", rsc_pwf[0].backoff_time)) {
if (rsc_pwf[0].backoff_time > gstate.now + 28*SECONDS_PER_DAY) {
rsc_pwf[0].backoff_time = gstate.now + 28*SECONDS_PER_DAY;
}
continue;
}
if (xp.match_tag("rsc_backoff_interval")) {
if (parse_rsc_param(xp, "/rsc_backoff_interval", rt, x)) {
rsc_pwf[rt].backoff_interval = x;
}
continue;
}
if (xp.match_tag("rsc_backoff_time")) {
if (parse_rsc_param(xp, "/rsc_backoff_time", rt, x)) {
rsc_pwf[rt].backoff_time = x;
}
continue;
}
if (xp.parse_double("resource_share", resource_share)) continue;
// not authoritative
if (xp.parse_double("duration_correction_factor", duration_correction_factor)) continue;
if (xp.parse_bool("attached_via_acct_mgr", attached_via_acct_mgr)) continue;
if (xp.parse_bool("no_cpu_apps", btemp)) {
if (btemp) handle_no_rsc_apps(this, "CPU");
continue;
}
// deprecated
if (xp.parse_bool("no_cuda_apps", btemp)) {
if (btemp) handle_no_rsc_apps(this, GPU_TYPE_NVIDIA);
continue;
}
if (xp.parse_bool("no_ati_apps", btemp)) {
if (btemp) handle_no_rsc_apps(this, GPU_TYPE_ATI);
continue;
}
if (xp.parse_str("no_rsc_apps", buf, sizeof(buf))) {
handle_no_rsc_apps(this, buf);
continue;
}
if (xp.parse_bool("no_cpu_ams", btemp)) {
if (btemp) handle_no_rsc_ams(this, "CPU");
continue;
}
if (xp.parse_bool("no_cuda_ams", btemp)) {
if (btemp) handle_no_rsc_ams(this, GPU_TYPE_NVIDIA);
continue;
}
if (xp.parse_bool("no_ati_ams", btemp)) {
if (btemp) handle_no_rsc_ams(this, GPU_TYPE_ATI);
continue;
}
if (xp.parse_bool("no_intel_gpu_ams", btemp)) {
if (btemp) handle_no_rsc_ams(this, GPU_TYPE_INTEL);
continue;
}
if (xp.parse_str("no_rsc_ams", buf, sizeof(buf))) {
handle_no_rsc_ams(this, buf);
continue;
}
if (xp.parse_str("no_rsc_pref", buf, sizeof(buf))) {
handle_no_rsc_pref(this, buf);
continue;
}
// backwards compat - old state files had ams_resource_share = 0
if (xp.parse_double("ams_resource_share_new", ams_resource_share)) continue;
if (xp.parse_double("ams_resource_share", x)) {
if (x > 0) ams_resource_share = x;
continue;
}
if (xp.parse_bool("scheduler_rpc_in_progress", btemp)) continue;
if (xp.parse_bool("use_symlinks", use_symlinks)) continue;
if (xp.parse_bool("anonymous_platform", btemp)) continue;
if (xp.parse_string("trickle_up_url", stemp)) {
trickle_up_ops.push_back(new TRICKLE_UP_OP(stemp));
continue;
}
if (xp.parse_double("desired_disk_usage", desired_disk_usage)) continue;
#ifdef SIM
if (xp.match_tag("available")) {
available.parse(xp, "/available");
continue;
}
#endif
if (log_flags.unparsed_xml) {
msg_printf(0, MSG_INFO,
"[unparsed_xml] PROJECT::parse_state(): unrecognized: %s",
xp.parsed_tag
);
}
xp.skip_unexpected();
}
return ERR_XML_PARSE;
}
// Write project information to client state file or GUI RPC reply
//
int PROJECT::write_state(MIOFILE& out, bool gui_rpc) {
unsigned int i;
char un[2048], tn[2048];
out.printf(
"\n"
);
xml_escape(user_name, un, sizeof(un));
xml_escape(team_name, tn, sizeof(tn));
out.printf(
" %s\n"
" %s\n"
" %s\n"
" %s\n"
" %s\n"
" %s\n"
" %s\n"
" %s\n"
" %f\n"
" %f\n"
" %f\n"
" %f\n"
" %d\n"
" %d\n"
" %d\n"
" %d\n"
" %f\n"
" %f\n"
" %f\n"
" %d\n"
" %d\n"
" %f\n"
" %f\n"
" %f\n"
" %f\n"
" %f\n"
" %f\n"
" %f\n"
" %d\n"
" %d\n"
" %d\n"
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
master_url,
project_name,
symstore,
un,
tn,
host_venue,
email_hash,
cross_project_id,
cpid_time,
user_total_credit,
user_expavg_credit,
user_create_time,
rpc_seqno,
userid,
teamid,
hostid,
host_total_credit,
host_expavg_credit,
host_create_time,
nrpc_failures,
master_fetch_failures,
min_rpc_time,
next_rpc_time,
pwf.rec,
pwf.rec_time,
resource_share,
desired_disk_usage,
duration_correction_factor,
sched_rpc_pending,
send_time_stats_log,
send_job_log,
anonymous_platform?" \n":"",
master_url_fetch_pending?" \n":"",
trickle_up_pending?" \n":"",
send_full_workload?" \n":"",
dont_use_dcf?" \n":"",
non_cpu_intensive?" \n":"",
verify_files_on_app_start?" \n":"",
suspended_via_gui?" \n":"",
dont_request_more_work?" \n":"",
detach_when_done?" \n":"",
ended?" \n":"",
attached_via_acct_mgr?" \n":"",
(this == gstate.scheduler_op->cur_proj)?" \n":"",
use_symlinks?" \n":""
);
for (int j=0; j\n"
" %s\n"
" %f\n"
" \n"
" \n"
" %s\n"
" %f\n"
" \n",
rsc_name(j), rsc_pwf[j].backoff_time,
rsc_name(j), rsc_pwf[j].backoff_interval
);
if (no_rsc_ams[j]) {
out.printf(" %s\n", rsc_name(j));
}
if (no_rsc_apps[j]) {
out.printf(" %s\n", rsc_name(j));
}
if (no_rsc_pref[j]) {
out.printf(" %s\n", rsc_name(j));
}
if (j>0 && gui_rpc && (rsc_pwf[j].ncoprocs_excluded == rsc_work_fetch[j].ninstances)) {
out.printf(" %s\n", rsc_name(j));
}
}
if (ams_resource_share >= 0) {
out.printf(" %f\n",
ams_resource_share
);
}
if (gui_rpc) {
out.printf(
"%s"
" %f\n"
" %f\n"
" %f\n",
gui_urls.c_str(),
sched_priority,
last_rpc_time,
project_files_downloaded_time
);
if (download_backoff.next_xfer_time > gstate.now) {
out.printf(
" %f\n",
download_backoff.next_xfer_time - gstate.now
);
}
if (upload_backoff.next_xfer_time > gstate.now) {
out.printf(
" %f\n",
upload_backoff.next_xfer_time - gstate.now
);
}
if (strlen(host_venue)) {
out.printf(" %s\n", host_venue);
}
out.printf(" %s\n", project_dir_absolute());
} else {
for (i=0; i%s\n",
scheduler_urls[i].c_str()
);
}
if (strlen(code_sign_key)) {
out.printf(
" \n%s\n\n", code_sign_key
);
}
for (i=0; i%s\n",
t->url.c_str()
);
}
}
out.printf(
"\n"
);
return 0;
}
// Some project data is stored in account file, other in client_state.xml
// Copy fields that are stored in client_state.xml from "p" into "this"
//
void PROJECT::copy_state_fields(PROJECT& p) {
scheduler_urls = p.scheduler_urls;
safe_strcpy(project_name, p.project_name);
safe_strcpy(user_name, p.user_name);
safe_strcpy(team_name, p.team_name);
safe_strcpy(host_venue, p.host_venue);
safe_strcpy(email_hash, p.email_hash);
safe_strcpy(cross_project_id, p.cross_project_id);
user_total_credit = p.user_total_credit;
user_expavg_credit = p.user_expavg_credit;
user_create_time = p.user_create_time;
cpid_time = p.cpid_time;
rpc_seqno = p.rpc_seqno;
userid = p.userid;
teamid = p.teamid;
hostid = p.hostid;
host_total_credit = p.host_total_credit;
host_expavg_credit = p.host_expavg_credit;
host_create_time = p.host_create_time;
nrpc_failures = p.nrpc_failures;
master_fetch_failures = p.master_fetch_failures;
min_rpc_time = p.min_rpc_time;
next_rpc_time = p.next_rpc_time;
master_url_fetch_pending = p.master_url_fetch_pending;
sched_rpc_pending = p.sched_rpc_pending;
trickle_up_pending = p.trickle_up_pending;
safe_strcpy(code_sign_key, p.code_sign_key);
for (int i=0; i= 0) {
resource_share = ams_resource_share;
}
desired_disk_usage = p.desired_disk_usage;
use_symlinks = p.use_symlinks;
}
// Write project statistic to GUI RPC reply
//
int PROJECT::write_statistics(MIOFILE& out) {
trim_statistics();
out.printf(
"\n"
" %s\n",
master_url
);
for (std::vector::iterator i=statistics.begin();
i!=statistics.end(); ++i
) {
out.printf(
" \n"
" %f\n"
" %f\n"
" %f\n"
" %f\n"
" %f\n"
" \n",
i->day,
i->user_total_credit,
i->user_expavg_credit,
i->host_total_credit,
i->host_expavg_credit
);
}
out.printf(
"\n"
);
return 0;
}
void PROJECT::suspend() {
suspended_via_gui = true;
gstate.request_schedule_cpus("project suspended");
gstate.request_work_fetch("project suspended");
}
void PROJECT::resume() {
suspended_via_gui = false;
gstate.request_schedule_cpus("project resumed");
gstate.request_work_fetch("project resumed");
}
void PROJECT::abort_not_started() {
for (unsigned int i=0; iproject != this) continue;
if (rp->is_not_started()) {
rp->abort_inactive(EXIT_ABORTED_VIA_GUI);
}
}
}
void PROJECT::get_task_durs(double& not_started_dur, double& in_progress_dur) {
not_started_dur = 0;
in_progress_dur = 0;
for (unsigned int i=0; iproject != this) continue;
double d = rp->estimated_runtime_remaining();
d /= gstate.time_stats.availability_frac(rp->avp->gpu_usage.rsc_type);
if (rp->is_not_started()) {
not_started_dur += d;
} else {
in_progress_dur += d;
}
}
}
const char* PROJECT::get_scheduler_url(int index, double r) {
int n = (int) scheduler_urls.size();
int ir = (int)(r*n);
int i = (index + ir)%n;
return scheduler_urls[i].c_str();
}
// delete current sym links.
// This is done when parsing scheduler reply,
// to ensure that we get rid of sym links for
// project files no longer in use
//
void PROJECT::delete_project_file_symlinks() {
unsigned int i;
char path[MAXPATHLEN];
for (i=0; i::iterator fref_iter;
fref_iter = project_files.begin();
while (fref_iter != project_files.end()) {
FILE_REF& fref = *fref_iter;
fip = gstate.lookup_file_info(this, fref.file_name);
if (!fip) {
msg_printf(this, MSG_INTERNAL_ERROR,
"project file refers to non-existent %s", fref.file_name
);
fref_iter = project_files.erase(fref_iter);
continue;
}
fref.file_info = fip;
fip->is_project_file = true;
fref_iter++;
}
}
void PROJECT::create_project_file_symlinks() {
for (unsigned i=0; iproject == this && fip->is_project_file && fip->status == FILE_PRESENT) {
write_symlink_for_project_file(fip);
}
}
}
void PROJECT::write_project_files(MIOFILE& f) {
unsigned int i;
if (!project_files.size()) return;
f.printf("\n");
for (i=0; i\n");
}
// write symlinks for project files.
// Note: it's conceivable that one physical file
// has several logical names, so try them all
//
int PROJECT::write_symlink_for_project_file(FILE_INFO* fip) {
char link_path[MAXPATHLEN], file_path[MAXPATHLEN];
unsigned int i;
for (i=0; iname);
make_soft_link(this, link_path, file_path);
}
return 0;
}
// a project file download just finished.
// If it's the last one, update project_files_downloaded_time
//
void PROJECT::update_project_files_downloaded_time() {
unsigned int i;
for (i=0; istatus != FILE_PRESENT) continue;
}
project_files_downloaded_time = gstate.now;
}
bool PROJECT::some_download_stalled() {
#ifndef SIM
unsigned int i;
if (!download_backoff.ok_to_transfer()) return true;
for (i=0; ipers_file_xfers.size(); i++) {
PERS_FILE_XFER* pfx = gstate.pers_file_xfers->pers_file_xfers[i];
if (pfx->fip->project != this) continue;
if (pfx->is_upload) continue;
if (pfx->next_request_time > gstate.now) return true;
}
#endif
return false;
}
bool PROJECT::runnable(int rsc_type) {
if (suspended_via_gui) return false;
for (unsigned int i=0; iproject != this) continue;
if (rsc_type != RSC_TYPE_ANY) {
if (rp->avp->gpu_usage.rsc_type != rsc_type) {
continue;
}
}
if (rp->runnable()) return true;
}
return false;
}
bool PROJECT::uploading() {
for (unsigned int i=0; ifile_xfers.size(); i++) {
FILE_XFER& fx = *gstate.file_xfers->file_xfers[i];
if (fx.fip->project == this && fx.is_upload) {
return true;
}
}
return false;
}
bool PROJECT::downloading() {
if (suspended_via_gui) return false;
for (unsigned int i=0; iproject != this) continue;
if (rp->downloading()) return true;
}
return false;
}
bool PROJECT::has_results() {
for (unsigned i=0; iproject == this) return true;
}
return false;
}
bool PROJECT::some_result_suspended() {
unsigned int i;
for (i=0; iproject != this) continue;
if (rp->suspended_via_gui) return true;
}
return false;
}
bool PROJECT::can_request_work() {
if (suspended_via_gui) return false;
if (master_url_fetch_pending) return false;
if (min_rpc_time > gstate.now) return false;
if (dont_request_more_work) return false;
if (gstate.in_abort_sequence) return false;
return true;
}
bool PROJECT::potentially_runnable() {
if (runnable(RSC_TYPE_ANY)) return true;
if (can_request_work()) return true;
if (downloading()) return true;
return false;
}
bool PROJECT::nearly_runnable() {
if (runnable(RSC_TYPE_ANY)) return true;
if (downloading()) return true;
return false;
}
void PROJECT::set_min_rpc_time(double future_time, const char* reason) {
if (future_time <= min_rpc_time) return;
min_rpc_time = future_time;
possibly_backed_off = true;
if (log_flags.sched_op_debug) {
msg_printf(this, MSG_INFO,
"[sched_op] Deferring communication for %s",
timediff_format(min_rpc_time - gstate.now).c_str()
);
msg_printf(this, MSG_INFO, "[sched_op] Reason: %s\n", reason);
}
}
// Return true if we should not contact the project yet.
//
bool PROJECT::waiting_until_min_rpc_time() {
return (min_rpc_time > gstate.now);
}
void PROJECT::trim_statistics() {
double cutoff = dday() - config.save_stats_days*86400;
// delete old stats; fill in the gaps if some days missing
//
while (!statistics.empty()) {
DAILY_STATS& ds = statistics[0];
if (ds.day >= cutoff) {
break;
}
if (statistics.size() > 1) {
DAILY_STATS& ds2 = statistics[1];
if (ds2.day <= cutoff) {
statistics.erase(statistics.begin());
} else {
ds.day = cutoff;
break;
}
} else {
ds.day = cutoff;
break;
}
}
}
const char* PROJECT::project_dir() {
if (_project_dir[0] == 0) {
char buf[1024];
escape_project_url(master_url, buf);
sprintf(_project_dir, "%s/%s", PROJECTS_DIR, buf);
}
return _project_dir;
}
const char* PROJECT::project_dir_absolute() {
if (_project_dir_absolute[0] == 0) {
relative_to_absolute(project_dir(), _project_dir_absolute);
}
return _project_dir_absolute;
}
// If no_rsc_apps flags are set for all resource types, something's wrong;
// clear them, and fall back to per-resource backoff.
// Otherwise we might never contact the project again
//
void PROJECT::check_no_rsc_apps() {
for (int i=0; iproject != this) continue;
no_rsc_apps[avp->gpu_usage.rsc_type] = false;
}
}