2008-08-06 18:36:30 +00:00
|
|
|
// This file is part of BOINC.
|
2005-01-20 23:22:22 +00:00
|
|
|
// http://boinc.berkeley.edu
|
2008-08-06 18:36:30 +00:00
|
|
|
// Copyright (C) 2008 University of California
|
2003-08-15 00:45:25 +00:00
|
|
|
//
|
2008-08-06 18:36:30 +00:00
|
|
|
// BOINC is free software; you can redistribute it and/or modify it
|
|
|
|
// under the terms of the GNU Lesser General Public License
|
|
|
|
// as published by the Free Software Foundation,
|
|
|
|
// either version 3 of the License, or (at your option) any later version.
|
2003-08-15 00:45:25 +00:00
|
|
|
//
|
2008-08-06 18:36:30 +00:00
|
|
|
// BOINC is distributed in the hope that it will be useful,
|
2005-01-20 23:22:22 +00:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
|
|
// See the GNU Lesser General Public License for more details.
|
2003-01-29 23:03:19 +00:00
|
|
|
//
|
2008-08-06 18:36:30 +00:00
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
|
2003-01-29 23:03:19 +00:00
|
|
|
|
2008-10-06 00:18:36 +00:00
|
|
|
/// db_dump: dump database views in XML format
|
|
|
|
// see http://boinc.berkeley.edu/trac/wiki/DbDump
|
2006-12-27 20:04:50 +00:00
|
|
|
|
|
|
|
// Note: this program is way more configurable than it needs to be.
|
|
|
|
// All projects export stats in the same format,
|
|
|
|
// as described in the default db_dump_spec.xml that is created for you.
|
|
|
|
|
2005-11-21 18:34:44 +00:00
|
|
|
#include "config.h"
|
2004-07-13 13:54:09 +00:00
|
|
|
#include <cstdio>
|
|
|
|
#include <cstring>
|
|
|
|
#include <cstdlib>
|
2003-01-30 23:03:52 +00:00
|
|
|
#include <unistd.h>
|
2004-06-21 05:03:56 +00:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/stat.h>
|
2003-11-25 23:34:00 +00:00
|
|
|
#include <string>
|
2004-06-21 05:03:56 +00:00
|
|
|
#include <vector>
|
2003-01-29 23:03:19 +00:00
|
|
|
|
2003-04-07 19:06:00 +00:00
|
|
|
#include "boinc_db.h"
|
2004-10-13 00:21:50 +00:00
|
|
|
#include "filesys.h"
|
2003-01-30 23:03:52 +00:00
|
|
|
#include "util.h"
|
2007-05-11 16:30:13 +00:00
|
|
|
#include "str_util.h"
|
2009-06-16 20:54:44 +00:00
|
|
|
#include "str_replace.h"
|
2004-06-21 05:03:56 +00:00
|
|
|
#include "error_numbers.h"
|
2004-04-18 18:40:13 +00:00
|
|
|
#include "md5_file.h"
|
2004-04-08 08:15:23 +00:00
|
|
|
#include "parse.h"
|
2009-09-17 17:56:59 +00:00
|
|
|
#include "svn_version.h"
|
2004-04-08 08:15:23 +00:00
|
|
|
|
2003-08-15 00:45:25 +00:00
|
|
|
#include "sched_config.h"
|
2003-03-08 00:09:40 +00:00
|
|
|
#include "sched_util.h"
|
2004-04-08 08:15:23 +00:00
|
|
|
#include "sched_msgs.h"
|
2003-01-29 23:03:19 +00:00
|
|
|
|
2004-06-30 18:17:21 +00:00
|
|
|
using std::string;
|
|
|
|
using std::vector;
|
|
|
|
|
2003-02-10 19:51:32 +00:00
|
|
|
#define LOCKFILE "db_dump.out"
|
|
|
|
|
2004-06-21 05:03:56 +00:00
|
|
|
#define COMPRESSION_NONE 0
|
|
|
|
#define COMPRESSION_GZIP 1
|
|
|
|
#define COMPRESSION_ZIP 2
|
|
|
|
|
2004-08-24 00:15:14 +00:00
|
|
|
#define SORT_NONE 0
|
|
|
|
#define SORT_ID 1
|
|
|
|
#define SORT_TOTAL_CREDIT 2
|
|
|
|
#define SORT_EXPAVG_CREDIT 3
|
2004-06-21 05:03:56 +00:00
|
|
|
|
|
|
|
#define TABLE_USER 0
|
|
|
|
#define TABLE_TEAM 1
|
|
|
|
#define TABLE_HOST 2
|
|
|
|
|
|
|
|
// must match the above
|
2005-02-16 23:17:43 +00:00
|
|
|
const char* table_name[3] = {"user", "team", "host"};
|
|
|
|
const char* tag_name[3] = {"users", "teams", "hosts"};
|
2004-06-21 05:03:56 +00:00
|
|
|
|
2005-07-19 19:33:22 +00:00
|
|
|
int nusers, nhosts, nteams;
|
|
|
|
double total_credit;
|
|
|
|
|
2004-06-21 05:03:56 +00:00
|
|
|
struct OUTPUT {
|
|
|
|
int recs_per_file;
|
|
|
|
bool detail;
|
|
|
|
int compression;
|
|
|
|
class ZFILE* zfile;
|
|
|
|
class NUMBERED_ZFILE* nzfile;
|
|
|
|
int parse(FILE*);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ENUMERATION {
|
|
|
|
int table;
|
|
|
|
int sort;
|
|
|
|
char filename[256];
|
|
|
|
vector<OUTPUT> outputs;
|
|
|
|
int parse(FILE*);
|
|
|
|
int make_it_happen(char*);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct DUMP_SPEC {
|
|
|
|
char output_dir[256];
|
|
|
|
char final_output_dir[256];
|
2006-12-27 20:04:50 +00:00
|
|
|
char archive_dir[256];
|
2004-06-21 05:03:56 +00:00
|
|
|
vector<ENUMERATION> enumerations;
|
|
|
|
int parse(FILE*);
|
|
|
|
};
|
2003-03-18 20:29:59 +00:00
|
|
|
|
2004-06-21 05:03:56 +00:00
|
|
|
int OUTPUT::parse(FILE* in) {
|
|
|
|
char buf[256], buf2[256];
|
|
|
|
|
|
|
|
recs_per_file = 0;
|
|
|
|
detail = false;
|
|
|
|
compression = COMPRESSION_NONE;
|
|
|
|
zfile = 0;
|
|
|
|
nzfile = 0;
|
|
|
|
while (fgets(buf, 256, in)) {
|
|
|
|
if (match_tag(buf, "</output>")) return 0;
|
|
|
|
if (parse_int(buf, "<recs_per_file>", recs_per_file)) continue;
|
|
|
|
if (match_tag(buf, "<detail/>")) {
|
|
|
|
detail = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (parse_str(buf, "<compression>", buf2, sizeof(buf2))) {
|
|
|
|
if (!strcmp(buf2, "gzip")) {
|
|
|
|
compression = COMPRESSION_GZIP;
|
|
|
|
} else if (!strcmp(buf2, "zip")) {
|
|
|
|
compression = COMPRESSION_ZIP;
|
|
|
|
} else {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2004-09-24 20:48:07 +00:00
|
|
|
"unrecognized compression type: %s", buf
|
|
|
|
);
|
2004-06-21 05:03:56 +00:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2004-09-24 20:48:07 +00:00
|
|
|
"OUTPUT::parse: unrecognized: %s", buf
|
|
|
|
);
|
2004-06-21 05:03:56 +00:00
|
|
|
}
|
|
|
|
return ERR_XML_PARSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ENUMERATION::parse(FILE* in) {
|
|
|
|
char buf[256], buf2[256];
|
|
|
|
int retval, i;
|
|
|
|
|
|
|
|
table = -1;
|
2004-08-24 00:15:14 +00:00
|
|
|
sort = SORT_NONE;
|
2004-06-21 05:03:56 +00:00
|
|
|
strcpy(filename, "");
|
|
|
|
while (fgets(buf, 256, in)) {
|
|
|
|
if (match_tag(buf, "</enumeration>")) {
|
|
|
|
if (table == -1) return ERR_XML_PARSE;
|
|
|
|
if (sort == -1) return ERR_XML_PARSE;
|
|
|
|
if (!strlen(filename)) return ERR_XML_PARSE;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (match_tag(buf, "<output>")) {
|
|
|
|
OUTPUT output;
|
|
|
|
retval = output.parse(in);
|
|
|
|
if (!retval) outputs.push_back(output);
|
|
|
|
}
|
|
|
|
if (parse_str(buf, "<filename>", filename, sizeof(filename))) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (parse_str(buf, "<table>", buf2, sizeof(buf2))) {
|
|
|
|
for (i=0; i<3; i++) {
|
|
|
|
if (!strcmp(buf2, table_name[i])) {
|
|
|
|
table = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (parse_str(buf, "<sort>", buf2, sizeof(buf2))) {
|
|
|
|
if (!strcmp(buf2, "id")) sort = SORT_ID;
|
|
|
|
if (!strcmp(buf2, "total_credit")) sort = SORT_TOTAL_CREDIT;
|
|
|
|
if (!strcmp(buf2, "expavg_credit")) sort = SORT_EXPAVG_CREDIT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ERR_XML_PARSE;
|
|
|
|
}
|
2003-01-30 23:03:52 +00:00
|
|
|
|
2004-06-21 05:03:56 +00:00
|
|
|
int DUMP_SPEC::parse(FILE* in) {
|
|
|
|
char buf[256];
|
|
|
|
int retval;
|
2003-12-07 19:29:56 +00:00
|
|
|
|
2006-12-27 20:04:50 +00:00
|
|
|
strcpy(output_dir, "");
|
|
|
|
strcpy(final_output_dir, "");
|
|
|
|
strcpy(archive_dir, "");
|
2004-06-21 05:03:56 +00:00
|
|
|
while (fgets(buf, 256, in)) {
|
2006-12-27 20:04:50 +00:00
|
|
|
if (match_tag(buf, "</boinc_db_dump_spec>")) {
|
|
|
|
if (!strlen(output_dir)) return ERR_XML_PARSE;
|
|
|
|
if (!strlen(final_output_dir)) return ERR_XML_PARSE;
|
|
|
|
return 0;
|
|
|
|
}
|
2004-06-21 05:03:56 +00:00
|
|
|
if (match_tag(buf, "<enumeration>")) {
|
|
|
|
ENUMERATION e;
|
|
|
|
retval = e.parse(in);
|
|
|
|
if (!retval) enumerations.push_back(e);
|
|
|
|
}
|
|
|
|
if (parse_str(buf, "<output_dir", output_dir, sizeof(output_dir))) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (parse_str(buf, "<final_output_dir", final_output_dir, sizeof(final_output_dir))) {
|
|
|
|
continue;
|
|
|
|
}
|
2006-12-27 20:04:50 +00:00
|
|
|
if (parse_str(buf, "<archive_dir", archive_dir, sizeof(archive_dir))) {
|
|
|
|
continue;
|
|
|
|
}
|
2004-06-21 05:03:56 +00:00
|
|
|
}
|
|
|
|
return ERR_XML_PARSE;
|
2003-12-07 19:29:56 +00:00
|
|
|
}
|
2003-08-15 21:12:36 +00:00
|
|
|
|
|
|
|
// class that automatically compresses on close
|
2004-02-05 21:35:48 +00:00
|
|
|
//
|
2003-08-15 21:12:36 +00:00
|
|
|
class ZFILE {
|
|
|
|
protected:
|
2003-12-07 19:29:56 +00:00
|
|
|
string tag; // enclosing XML tag
|
2012-05-09 16:11:50 +00:00
|
|
|
char current_path[MAXPATHLEN];
|
2004-06-21 05:03:56 +00:00
|
|
|
int compression;
|
2003-08-15 21:12:36 +00:00
|
|
|
public:
|
2004-06-21 05:03:56 +00:00
|
|
|
FILE* f;
|
2004-07-15 18:54:17 +00:00
|
|
|
ZFILE(string tag_, int comp): tag(tag_), compression(comp), f(0) {}
|
2003-08-15 21:12:36 +00:00
|
|
|
~ZFILE() { close(); }
|
|
|
|
|
2004-06-21 05:03:56 +00:00
|
|
|
void open(const char* filename) {
|
2003-08-15 21:12:36 +00:00
|
|
|
close();
|
|
|
|
|
2004-06-21 05:03:56 +00:00
|
|
|
f = fopen(filename, "w");
|
2003-08-15 21:12:36 +00:00
|
|
|
if (!f) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2004-09-24 20:48:07 +00:00
|
|
|
"Couldn't open %s for output\n", filename
|
|
|
|
);
|
2003-08-15 21:12:36 +00:00
|
|
|
}
|
2003-10-24 22:18:14 +00:00
|
|
|
fprintf(f,
|
2004-05-05 00:50:33 +00:00
|
|
|
"<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\n<%s>\n", tag.c_str()
|
2003-10-24 22:18:14 +00:00
|
|
|
);
|
2007-09-24 22:57:00 +00:00
|
|
|
safe_strcpy(current_path, filename);
|
2004-06-21 05:03:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void open_num(const char* filename, int filenum) {
|
|
|
|
char buf[256];
|
|
|
|
sprintf(buf, "%s_%d", filename, filenum);
|
|
|
|
open(buf);
|
2003-08-15 21:12:36 +00:00
|
|
|
}
|
2004-06-21 05:03:56 +00:00
|
|
|
|
2003-10-24 22:18:14 +00:00
|
|
|
void close() {
|
2004-06-21 05:03:56 +00:00
|
|
|
char buf[256];
|
2007-04-18 20:49:58 +00:00
|
|
|
int retval;
|
2003-08-15 21:12:36 +00:00
|
|
|
if (f) {
|
2003-10-24 22:18:14 +00:00
|
|
|
fprintf(f, "</%s>\n", tag.c_str());
|
2003-08-15 21:12:36 +00:00
|
|
|
fclose(f);
|
2004-06-21 05:03:56 +00:00
|
|
|
switch(compression) {
|
|
|
|
case COMPRESSION_ZIP:
|
|
|
|
sprintf(buf, "zip -q %s", current_path);
|
2007-04-18 20:49:58 +00:00
|
|
|
retval = system(buf);
|
|
|
|
if (retval) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2010-11-08 17:51:57 +00:00
|
|
|
"%s failed: %s\n", buf, boincerror(retval)
|
2007-04-18 20:49:58 +00:00
|
|
|
);
|
|
|
|
exit(retval);
|
|
|
|
}
|
2004-06-21 05:03:56 +00:00
|
|
|
break;
|
|
|
|
case COMPRESSION_GZIP:
|
|
|
|
sprintf(buf, "gzip -fq %s", current_path);
|
2007-04-18 20:49:58 +00:00
|
|
|
retval = system(buf);
|
|
|
|
if (retval) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2010-11-08 17:51:57 +00:00
|
|
|
"%s failed: %s\n", buf, boincerror(retval)
|
2007-04-18 20:49:58 +00:00
|
|
|
);
|
|
|
|
exit(retval);
|
|
|
|
}
|
2004-06-21 05:03:56 +00:00
|
|
|
break;
|
2003-08-15 21:12:36 +00:00
|
|
|
}
|
|
|
|
f = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2004-02-05 21:35:48 +00:00
|
|
|
// class that automatically opens a new file every N IDs
|
|
|
|
//
|
2003-08-15 21:12:36 +00:00
|
|
|
class NUMBERED_ZFILE : public ZFILE {
|
2004-06-21 05:03:56 +00:00
|
|
|
const char* filename_base;
|
2004-02-05 21:35:48 +00:00
|
|
|
int nids_per_file;
|
|
|
|
int last_filenum;
|
2003-08-15 21:12:36 +00:00
|
|
|
public:
|
2004-06-21 05:03:56 +00:00
|
|
|
NUMBERED_ZFILE(string tag_, int comp, const char* fb, int nids_per_file_)
|
|
|
|
: ZFILE(tag_, comp),
|
|
|
|
filename_base(fb),
|
|
|
|
nids_per_file(nids_per_file_),
|
|
|
|
last_filenum(-1)
|
|
|
|
{}
|
2003-08-15 21:12:36 +00:00
|
|
|
|
2004-02-05 21:35:48 +00:00
|
|
|
void set_id(int);
|
|
|
|
|
2003-08-15 21:12:36 +00:00
|
|
|
};
|
2003-02-26 21:54:34 +00:00
|
|
|
|
2004-02-05 21:35:48 +00:00
|
|
|
void NUMBERED_ZFILE::set_id(int id) {
|
|
|
|
int filenum = id/nids_per_file;
|
|
|
|
if (!f || (filenum != last_filenum)) {
|
2004-06-21 05:03:56 +00:00
|
|
|
open_num(filename_base, filenum);
|
2004-02-05 21:35:48 +00:00
|
|
|
last_filenum = filenum;
|
|
|
|
}
|
|
|
|
}
|
2005-05-12 21:33:18 +00:00
|
|
|
|
2004-06-21 23:37:06 +00:00
|
|
|
void write_host(HOST& host, FILE* f, bool detail) {
|
2004-07-15 18:54:17 +00:00
|
|
|
int retval;
|
2006-06-01 19:59:57 +00:00
|
|
|
char p_vendor[2048], p_model[2048], os_name[2048], os_version[2048];
|
2004-07-15 18:54:17 +00:00
|
|
|
|
2008-08-13 17:27:13 +00:00
|
|
|
xml_escape(host.p_vendor, p_vendor, sizeof(p_vendor));
|
|
|
|
xml_escape(host.p_model, p_model, sizeof(p_model));
|
|
|
|
xml_escape(host.os_name, os_name, sizeof(os_name));
|
|
|
|
xml_escape(host.os_version, os_version, sizeof(os_version));
|
2003-01-30 23:03:52 +00:00
|
|
|
fprintf(f,
|
|
|
|
"<host>\n"
|
2003-02-06 00:24:30 +00:00
|
|
|
" <id>%d</id>\n",
|
|
|
|
host.id
|
|
|
|
);
|
2004-06-21 23:37:06 +00:00
|
|
|
if (detail) {
|
2003-06-04 17:21:26 +00:00
|
|
|
DB_USER user;
|
2004-07-15 18:54:17 +00:00
|
|
|
retval = user.lookup_id(host.userid);
|
|
|
|
if (retval) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2010-11-08 17:51:57 +00:00
|
|
|
"user lookup of user %d for host %d: %s\n",
|
|
|
|
host.userid, host.id, boincerror(retval)
|
2005-05-12 21:33:18 +00:00
|
|
|
);
|
2005-06-12 20:43:59 +00:00
|
|
|
} else {
|
|
|
|
if (user.show_hosts) {
|
|
|
|
fprintf(f,
|
|
|
|
" <userid>%d</userid>\n",
|
|
|
|
host.userid
|
|
|
|
);
|
|
|
|
}
|
2003-03-20 02:05:25 +00:00
|
|
|
}
|
2003-02-06 00:24:30 +00:00
|
|
|
}
|
|
|
|
fprintf(f,
|
2003-01-30 23:03:52 +00:00
|
|
|
" <total_credit>%f</total_credit>\n"
|
|
|
|
" <expavg_credit>%f</expavg_credit>\n"
|
2004-06-21 05:03:56 +00:00
|
|
|
" <expavg_time>%f</expavg_time>\n"
|
2003-01-30 23:03:52 +00:00
|
|
|
" <p_vendor>%s</p_vendor>\n"
|
|
|
|
" <p_model>%s</p_model>\n"
|
|
|
|
" <os_name>%s</os_name>\n"
|
2006-10-21 01:44:32 +00:00
|
|
|
" <os_version>%s</os_version>\n",
|
2003-01-30 23:03:52 +00:00
|
|
|
host.total_credit,
|
|
|
|
host.expavg_credit,
|
2004-06-21 05:03:56 +00:00
|
|
|
host.expavg_time,
|
2006-04-17 22:41:29 +00:00
|
|
|
p_vendor,
|
|
|
|
p_model,
|
|
|
|
os_name,
|
2006-10-21 01:44:32 +00:00
|
|
|
os_version
|
2003-01-30 23:03:52 +00:00
|
|
|
);
|
2008-12-16 18:46:28 +00:00
|
|
|
|
|
|
|
// host.serialnum stores coprocessor description
|
|
|
|
//
|
|
|
|
if (strlen(host.serialnum)) {
|
|
|
|
char serialnum[1024];
|
|
|
|
xml_escape(host.serialnum, serialnum, sizeof(serialnum));
|
|
|
|
fprintf(f,
|
|
|
|
" <coprocs>%s</coprocs>\n", serialnum
|
|
|
|
);
|
|
|
|
}
|
2003-01-30 23:03:52 +00:00
|
|
|
if (detail) {
|
|
|
|
fprintf(f,
|
|
|
|
" <create_time>%d</create_time>\n"
|
2003-11-25 15:51:19 +00:00
|
|
|
" <rpc_time>%d</rpc_time>\n"
|
2003-01-30 23:03:52 +00:00
|
|
|
" <timezone>%d</timezone>\n"
|
|
|
|
" <ncpus>%d</ncpus>\n"
|
|
|
|
" <p_fpops>%f</p_fpops>\n"
|
|
|
|
" <p_iops>%f</p_iops>\n"
|
|
|
|
" <p_membw>%f</p_membw>\n"
|
|
|
|
" <m_nbytes>%f</m_nbytes>\n"
|
|
|
|
" <m_cache>%f</m_cache>\n"
|
|
|
|
" <m_swap>%f</m_swap>\n"
|
|
|
|
" <d_total>%f</d_total>\n"
|
|
|
|
" <d_free>%f</d_free>\n"
|
|
|
|
" <n_bwup>%f</n_bwup>\n"
|
2004-12-06 22:41:19 +00:00
|
|
|
" <n_bwdown>%f</n_bwdown>\n"
|
2005-01-20 18:50:49 +00:00
|
|
|
" <avg_turnaround>%f</avg_turnaround>\n"
|
2007-01-12 17:42:29 +00:00
|
|
|
" <credit_per_cpu_sec>%f</credit_per_cpu_sec>\n"
|
2005-01-20 18:50:49 +00:00
|
|
|
" <host_cpid>%s</host_cpid>\n",
|
2003-01-30 23:03:52 +00:00
|
|
|
host.create_time,
|
2003-11-25 15:51:19 +00:00
|
|
|
host.rpc_time,
|
2003-01-30 23:03:52 +00:00
|
|
|
host.timezone,
|
|
|
|
host.p_ncpus,
|
|
|
|
host.p_fpops,
|
|
|
|
host.p_iops,
|
|
|
|
host.p_membw,
|
|
|
|
host.m_nbytes,
|
|
|
|
host.m_cache,
|
|
|
|
host.m_swap,
|
|
|
|
host.d_total,
|
|
|
|
host.d_free,
|
|
|
|
host.n_bwup,
|
2004-12-06 22:41:19 +00:00
|
|
|
host.n_bwdown,
|
2005-01-20 18:50:49 +00:00
|
|
|
host.avg_turnaround,
|
2007-01-12 17:42:29 +00:00
|
|
|
host.credit_per_cpu_sec,
|
2005-01-20 18:50:49 +00:00
|
|
|
host.host_cpid
|
2003-01-30 23:03:52 +00:00
|
|
|
);
|
|
|
|
}
|
2003-01-29 23:03:19 +00:00
|
|
|
fprintf(f,
|
2003-01-30 23:03:52 +00:00
|
|
|
"</host>\n"
|
2003-01-29 23:03:19 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2005-02-16 23:17:43 +00:00
|
|
|
void write_user(USER& user, FILE* f, bool /*detail*/) {
|
2004-04-18 18:40:13 +00:00
|
|
|
char buf[1024];
|
|
|
|
char cpid[MD5_LEN];
|
2003-06-04 17:21:26 +00:00
|
|
|
|
2006-06-01 19:59:57 +00:00
|
|
|
char name[2048], url[2048];
|
2008-08-13 17:27:13 +00:00
|
|
|
xml_escape(user.name, name, sizeof(name));
|
|
|
|
xml_escape(user.url, url, sizeof(url));
|
2003-11-25 23:34:00 +00:00
|
|
|
|
2007-09-24 22:57:00 +00:00
|
|
|
safe_strcpy(buf, user.cross_project_id);
|
|
|
|
safe_strcat(buf, user.email_addr);
|
2004-04-18 18:40:13 +00:00
|
|
|
md5_block((unsigned char*)buf, strlen(buf), cpid);
|
|
|
|
|
2003-01-30 23:03:52 +00:00
|
|
|
fprintf(f,
|
|
|
|
"<user>\n"
|
|
|
|
" <id>%d</id>\n"
|
|
|
|
" <name>%s</name>\n"
|
2003-06-18 20:52:17 +00:00
|
|
|
" <country>%s</country>\n"
|
|
|
|
" <create_time>%d</create_time>\n"
|
2003-01-30 23:03:52 +00:00
|
|
|
" <total_credit>%f</total_credit>\n"
|
2004-04-18 18:40:13 +00:00
|
|
|
" <expavg_credit>%f</expavg_credit>\n"
|
2004-06-21 05:03:56 +00:00
|
|
|
" <expavg_time>%f</expavg_time>\n"
|
2004-04-18 18:40:13 +00:00
|
|
|
" <cpid>%s</cpid>\n",
|
2003-01-30 23:03:52 +00:00
|
|
|
user.id,
|
2006-04-17 22:41:29 +00:00
|
|
|
name,
|
2003-06-18 20:52:17 +00:00
|
|
|
user.country,
|
|
|
|
user.create_time,
|
2003-01-30 23:03:52 +00:00
|
|
|
user.total_credit,
|
2004-04-18 18:40:13 +00:00
|
|
|
user.expavg_credit,
|
2004-06-21 05:03:56 +00:00
|
|
|
user.expavg_time,
|
2004-04-18 18:40:13 +00:00
|
|
|
cpid
|
2003-01-30 23:03:52 +00:00
|
|
|
);
|
2004-06-21 05:03:56 +00:00
|
|
|
if (strlen(user.url)) {
|
|
|
|
fprintf(f,
|
|
|
|
" <url>%s</url>\n",
|
2006-04-17 22:41:29 +00:00
|
|
|
url
|
2004-06-21 05:03:56 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
if (user.teamid) {
|
2003-02-06 00:24:30 +00:00
|
|
|
fprintf(f,
|
|
|
|
" <teamid>%d</teamid>\n",
|
|
|
|
user.teamid
|
|
|
|
);
|
|
|
|
}
|
2003-12-15 02:31:29 +00:00
|
|
|
if (user.has_profile) {
|
|
|
|
fprintf(f,
|
|
|
|
" <has_profile/>\n"
|
|
|
|
);
|
|
|
|
}
|
2004-09-24 20:48:07 +00:00
|
|
|
#if 0
|
2003-03-20 02:05:25 +00:00
|
|
|
if (detail && user.show_hosts) {
|
2004-09-27 04:26:51 +00:00
|
|
|
DB_HOST host;
|
2003-06-04 17:21:26 +00:00
|
|
|
sprintf(buf, "where userid=%d", user.id);
|
2005-01-14 03:32:16 +00:00
|
|
|
while (1) {
|
|
|
|
retval = host.enumerate(buf)
|
|
|
|
if (retval) break;
|
2004-02-05 21:35:48 +00:00
|
|
|
if (host.total_credit > 0) {
|
2004-06-21 23:37:06 +00:00
|
|
|
write_host(host, f, false);
|
2004-02-05 21:35:48 +00:00
|
|
|
}
|
2003-01-30 23:03:52 +00:00
|
|
|
}
|
2005-01-14 03:32:16 +00:00
|
|
|
if (retval != ERR_DB_NOT_FOUND) {
|
2010-11-08 17:51:57 +00:00
|
|
|
boinc_db.print_error("host enum: %s", boincerror(retval));
|
2005-01-14 03:32:16 +00:00
|
|
|
exit(retval);
|
|
|
|
}
|
2003-01-30 23:03:52 +00:00
|
|
|
}
|
2004-09-24 20:48:07 +00:00
|
|
|
#endif
|
2003-01-30 23:03:52 +00:00
|
|
|
fprintf(f,
|
|
|
|
"</user>\n"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
void write_team(TEAM& team, FILE* f, bool detail) {
|
2003-06-04 17:21:26 +00:00
|
|
|
DB_USER user;
|
2003-12-07 18:58:08 +00:00
|
|
|
char buf[256];
|
2006-06-01 19:59:57 +00:00
|
|
|
char name[2048];
|
2006-10-21 01:44:32 +00:00
|
|
|
char url[2048], name_html[2048];
|
2005-01-14 03:32:16 +00:00
|
|
|
int retval;
|
2008-08-13 17:27:13 +00:00
|
|
|
char description[BLOB_SIZE];
|
2004-07-15 18:54:17 +00:00
|
|
|
|
2008-08-13 17:27:13 +00:00
|
|
|
xml_escape(team.name, name, sizeof(name));
|
2003-11-25 23:34:00 +00:00
|
|
|
|
2003-01-30 23:03:52 +00:00
|
|
|
fprintf(f,
|
|
|
|
"<team>\n"
|
|
|
|
" <id>%d</id>\n"
|
2005-01-14 21:21:04 +00:00
|
|
|
" <type>%d</type>\n"
|
2003-01-30 23:03:52 +00:00
|
|
|
" <name>%s</name>\n"
|
2004-08-12 15:06:35 +00:00
|
|
|
" <userid>%d</userid>\n"
|
2003-01-30 23:03:52 +00:00
|
|
|
" <total_credit>%f</total_credit>\n"
|
|
|
|
" <expavg_credit>%f</expavg_credit>\n"
|
2007-01-15 22:53:41 +00:00
|
|
|
" <expavg_time>%f</expavg_time>\n",
|
2003-01-30 23:03:52 +00:00
|
|
|
team.id,
|
2005-01-14 21:21:04 +00:00
|
|
|
team.type,
|
2006-04-17 22:41:29 +00:00
|
|
|
name,
|
2004-08-12 15:06:35 +00:00
|
|
|
team.userid,
|
2003-01-30 23:03:52 +00:00
|
|
|
team.total_credit,
|
|
|
|
team.expavg_credit,
|
2007-01-15 22:53:41 +00:00
|
|
|
team.expavg_time
|
2003-01-30 23:03:52 +00:00
|
|
|
);
|
2003-11-25 23:34:00 +00:00
|
|
|
|
2005-01-14 21:21:04 +00:00
|
|
|
// show founder name since that user might not be active
|
|
|
|
//
|
|
|
|
retval = user.lookup_id(team.userid);
|
|
|
|
if (!retval) {
|
2006-06-01 19:59:57 +00:00
|
|
|
char fname[2048];
|
2008-08-13 17:27:13 +00:00
|
|
|
xml_escape(user.name, fname, sizeof(fname));
|
2005-01-14 21:21:04 +00:00
|
|
|
fprintf(f,
|
|
|
|
" <founder_name>%s</founder_name>\n",
|
2006-04-17 22:41:29 +00:00
|
|
|
fname
|
2005-01-14 21:21:04 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2004-07-15 18:54:17 +00:00
|
|
|
fprintf(f,
|
|
|
|
" <create_time>%d</create_time>\n",
|
|
|
|
team.create_time
|
|
|
|
);
|
|
|
|
if (strlen(team.url)) {
|
2008-08-13 17:27:13 +00:00
|
|
|
xml_escape(team.url, url, sizeof(url));
|
2003-01-30 23:03:52 +00:00
|
|
|
fprintf(f,
|
2004-07-15 18:54:17 +00:00
|
|
|
" <url>%s</url>\n",
|
2006-04-17 22:41:29 +00:00
|
|
|
url
|
2003-01-30 23:03:52 +00:00
|
|
|
);
|
2004-07-15 18:54:17 +00:00
|
|
|
}
|
|
|
|
if (strlen(team.name_html)) {
|
2008-08-13 17:27:13 +00:00
|
|
|
xml_escape(team.name_html, name_html, sizeof(name_html));
|
2003-01-30 23:03:52 +00:00
|
|
|
fprintf(f,
|
2004-07-15 18:54:17 +00:00
|
|
|
"<name_html>%s</name_html>\n",
|
2006-04-17 22:41:29 +00:00
|
|
|
name_html
|
2003-01-30 23:03:52 +00:00
|
|
|
);
|
2004-07-15 18:54:17 +00:00
|
|
|
}
|
2006-10-21 01:44:32 +00:00
|
|
|
|
2004-07-15 18:54:17 +00:00
|
|
|
if (strlen(team.description)) {
|
2008-08-13 17:27:13 +00:00
|
|
|
xml_escape(team.description, description, sizeof(description));
|
2004-07-15 18:54:17 +00:00
|
|
|
fprintf(f,
|
|
|
|
"<description>%s</description>\n",
|
2006-04-17 22:41:29 +00:00
|
|
|
description
|
2004-07-15 18:54:17 +00:00
|
|
|
);
|
|
|
|
}
|
2006-10-21 01:44:32 +00:00
|
|
|
|
2004-07-15 18:54:17 +00:00
|
|
|
fprintf(f,
|
|
|
|
" <country>%s</country>\n",
|
|
|
|
team.country
|
|
|
|
);
|
|
|
|
if (detail) {
|
2003-06-04 17:21:26 +00:00
|
|
|
sprintf(buf, "where teamid=%d", team.id);
|
2005-01-14 03:32:16 +00:00
|
|
|
while (1) {
|
|
|
|
retval = user.enumerate(buf);
|
|
|
|
if (retval) break;
|
2004-06-21 05:03:56 +00:00
|
|
|
write_user(user, f, false);
|
2003-01-30 23:03:52 +00:00
|
|
|
}
|
2005-01-14 03:32:16 +00:00
|
|
|
if (retval != ERR_DB_NOT_FOUND) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2010-11-08 17:51:57 +00:00
|
|
|
"user enum: %s", boincerror(retval)
|
2005-05-12 21:33:18 +00:00
|
|
|
);
|
2005-01-14 03:32:16 +00:00
|
|
|
exit(retval);
|
|
|
|
}
|
2003-01-30 23:03:52 +00:00
|
|
|
}
|
|
|
|
fprintf(f,
|
|
|
|
"</team>\n"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2003-12-03 17:48:12 +00:00
|
|
|
int print_app(FILE* f, APP& app) {
|
|
|
|
fprintf(f, " <application>\n");
|
2003-12-11 19:05:52 +00:00
|
|
|
fprintf(f, " <name>%s</name>\n", app.user_friendly_name);
|
2003-12-03 17:48:12 +00:00
|
|
|
|
2004-06-30 18:53:35 +00:00
|
|
|
#if 0
|
2004-08-12 13:02:25 +00:00
|
|
|
DB_RESULT result;
|
|
|
|
char buf[256];
|
|
|
|
int n, retval;
|
2004-06-30 18:53:35 +00:00
|
|
|
// can't do this stuff because MySQL/InnoDB can't do counts efficiently
|
|
|
|
//
|
2003-12-03 17:48:12 +00:00
|
|
|
sprintf(buf, "where appid=%d and server_state=%d", app.id, RESULT_SERVER_STATE_UNSENT);
|
|
|
|
retval = result.count(n, buf);
|
2004-06-11 18:50:15 +00:00
|
|
|
if (!retval) {
|
|
|
|
fprintf(f, " <results_unsent>%d</results_unsent>\n", n);
|
|
|
|
}
|
2003-12-03 17:48:12 +00:00
|
|
|
|
|
|
|
sprintf(buf, "where appid=%d and server_state=%d", app.id, RESULT_SERVER_STATE_IN_PROGRESS);
|
|
|
|
retval = result.count(n, buf);
|
2004-06-11 18:50:15 +00:00
|
|
|
if (!retval) {
|
|
|
|
fprintf(f, " <results_in_progress>%d</results_in_progress>\n", n);
|
|
|
|
}
|
2003-12-03 17:48:12 +00:00
|
|
|
|
|
|
|
sprintf(buf, "where appid=%d and server_state=%d", app.id, RESULT_SERVER_STATE_OVER);
|
|
|
|
retval = result.count(n, buf);
|
2004-06-11 18:50:15 +00:00
|
|
|
if (!retval) {
|
|
|
|
fprintf(f, " <results_over>%d</results_over>\n", n);
|
|
|
|
}
|
2004-06-30 18:53:35 +00:00
|
|
|
#endif
|
2004-07-13 13:54:09 +00:00
|
|
|
|
2003-12-03 23:29:22 +00:00
|
|
|
fprintf(f, " </application>\n");
|
2003-12-03 17:48:12 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int print_apps(FILE* f) {
|
|
|
|
DB_APP app;
|
|
|
|
fprintf(f, " <applications>\n");
|
|
|
|
while (!app.enumerate()) {
|
|
|
|
print_app(f, app);
|
|
|
|
}
|
|
|
|
fprintf(f, " </applications>\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-06-21 05:03:56 +00:00
|
|
|
int tables_file(char* dir) {
|
|
|
|
char buf[256];
|
|
|
|
|
2003-10-24 22:18:14 +00:00
|
|
|
ZFILE f("tables", false);
|
2004-06-21 05:03:56 +00:00
|
|
|
sprintf(buf, "%s/tables.xml", dir);
|
|
|
|
f.open(buf);
|
2004-06-30 18:53:35 +00:00
|
|
|
fprintf(f.f,
|
|
|
|
" <update_time>%d</update_time>\n",
|
|
|
|
(int)time(0)
|
|
|
|
);
|
2005-07-19 19:33:22 +00:00
|
|
|
if (nusers) fprintf(f.f, " <nusers_total>%d</nusers_total>\n", nusers);
|
|
|
|
if (nteams) fprintf(f.f, " <nteams_total>%d</nteams_total>\n", nteams);
|
|
|
|
if (nhosts) fprintf(f.f, " <nhosts_total>%d</nhosts_total>\n", nhosts);
|
|
|
|
if (total_credit) fprintf(f.f, " <total_credit>%lf</total_credit>\n", total_credit);
|
2004-06-21 05:03:56 +00:00
|
|
|
print_apps(f.f);
|
2003-10-24 22:18:14 +00:00
|
|
|
f.close();
|
2003-01-30 23:03:52 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2004-06-21 05:03:56 +00:00
|
|
|
int ENUMERATION::make_it_happen(char* output_dir) {
|
|
|
|
unsigned int i;
|
2005-01-14 03:32:16 +00:00
|
|
|
int n, retval;
|
2004-06-21 05:03:56 +00:00
|
|
|
DB_USER user;
|
|
|
|
DB_TEAM team;
|
|
|
|
DB_HOST host;
|
|
|
|
char clause[256];
|
2012-05-09 16:11:50 +00:00
|
|
|
char path[MAXPATHLEN];
|
2004-06-21 05:03:56 +00:00
|
|
|
|
|
|
|
sprintf(path, "%s/%s", output_dir, filename);
|
|
|
|
|
|
|
|
for (i=0; i<outputs.size(); i++) {
|
|
|
|
OUTPUT& out = outputs[i];
|
|
|
|
if (out.recs_per_file) {
|
|
|
|
out.nzfile = new NUMBERED_ZFILE(
|
|
|
|
tag_name[table], out.compression, path, out.recs_per_file
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
out.zfile = new ZFILE(tag_name[table], out.compression);
|
|
|
|
out.zfile->open(path);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
switch(sort) {
|
2004-08-24 00:15:14 +00:00
|
|
|
case SORT_NONE:
|
|
|
|
strcpy(clause, "where total_credit > 0");
|
|
|
|
break;
|
2004-06-21 05:03:56 +00:00
|
|
|
case SORT_ID:
|
2004-07-15 18:54:17 +00:00
|
|
|
strcpy(clause, "where total_credit > 0 order by id");
|
2004-06-21 05:03:56 +00:00
|
|
|
break;
|
|
|
|
case SORT_TOTAL_CREDIT:
|
2004-07-15 18:54:17 +00:00
|
|
|
strcpy(clause, "where total_credit > 0 order by total_credit desc");
|
2004-06-21 05:03:56 +00:00
|
|
|
break;
|
|
|
|
case SORT_EXPAVG_CREDIT:
|
2004-07-15 18:54:17 +00:00
|
|
|
strcpy(clause, "where total_credit > 0 order by expavg_credit desc");
|
2004-06-21 05:03:56 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
switch(table) {
|
|
|
|
case TABLE_USER:
|
|
|
|
n = 0;
|
2005-01-14 03:32:16 +00:00
|
|
|
while (1) {
|
|
|
|
retval = user.enumerate(clause, true);
|
|
|
|
if (retval) break;
|
2005-07-19 19:33:22 +00:00
|
|
|
nusers++;
|
|
|
|
total_credit += user.total_credit;
|
2004-06-21 05:03:56 +00:00
|
|
|
for (i=0; i<outputs.size(); i++) {
|
|
|
|
OUTPUT& out = outputs[i];
|
|
|
|
if (sort == SORT_ID && out.recs_per_file) {
|
|
|
|
out.nzfile->set_id(n++);
|
|
|
|
}
|
|
|
|
if (out.zfile) {
|
|
|
|
write_user(user, out.zfile->f, out.detail);
|
|
|
|
} else {
|
|
|
|
write_user(user, out.nzfile->f, out.detail);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-01-14 03:32:16 +00:00
|
|
|
if (retval != ERR_DB_NOT_FOUND) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2010-11-08 17:51:57 +00:00
|
|
|
"user enum: %s", boincerror(retval)
|
2005-05-12 21:33:18 +00:00
|
|
|
);
|
2005-01-14 03:32:16 +00:00
|
|
|
exit(retval);
|
|
|
|
}
|
2004-06-21 05:03:56 +00:00
|
|
|
break;
|
|
|
|
case TABLE_HOST:
|
2004-06-21 23:37:06 +00:00
|
|
|
n = 0;
|
2005-01-14 03:32:16 +00:00
|
|
|
while(1) {
|
|
|
|
retval = host.enumerate(clause);
|
|
|
|
if (retval) break;
|
2005-09-01 17:28:29 +00:00
|
|
|
if (!host.userid) continue;
|
2005-07-19 19:33:22 +00:00
|
|
|
nhosts++;
|
2004-06-21 23:37:06 +00:00
|
|
|
for (i=0; i<outputs.size(); i++) {
|
|
|
|
OUTPUT& out = outputs[i];
|
|
|
|
if (sort == SORT_ID && out.recs_per_file) {
|
|
|
|
out.nzfile->set_id(n++);
|
|
|
|
}
|
|
|
|
if (out.zfile) {
|
|
|
|
write_host(host, out.zfile->f, out.detail);
|
|
|
|
} else {
|
|
|
|
write_host(host, out.nzfile->f, out.detail);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-01-14 03:32:16 +00:00
|
|
|
if (retval != ERR_DB_NOT_FOUND) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2010-11-08 17:51:57 +00:00
|
|
|
"host enum: %s", boincerror(retval)
|
2005-05-12 21:33:18 +00:00
|
|
|
);
|
2005-01-14 03:32:16 +00:00
|
|
|
exit(retval);
|
|
|
|
}
|
2004-06-21 05:03:56 +00:00
|
|
|
break;
|
|
|
|
case TABLE_TEAM:
|
2004-06-21 23:37:06 +00:00
|
|
|
n = 0;
|
2005-01-14 03:32:16 +00:00
|
|
|
while(1) {
|
|
|
|
retval = team.enumerate(clause);
|
|
|
|
if (retval) break;
|
2005-07-19 19:33:22 +00:00
|
|
|
nteams++;
|
2004-06-21 23:37:06 +00:00
|
|
|
for (i=0; i<outputs.size(); i++) {
|
|
|
|
OUTPUT& out = outputs[i];
|
|
|
|
if (sort == SORT_ID && out.recs_per_file) {
|
|
|
|
out.nzfile->set_id(n++);
|
|
|
|
}
|
|
|
|
if (out.zfile) {
|
|
|
|
write_team(team, out.zfile->f, out.detail);
|
|
|
|
} else {
|
|
|
|
write_team(team, out.nzfile->f, out.detail);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-01-14 03:32:16 +00:00
|
|
|
if (retval != ERR_DB_NOT_FOUND) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2010-11-08 17:51:57 +00:00
|
|
|
"team enum: %s", boincerror(retval)
|
2005-05-12 21:33:18 +00:00
|
|
|
);
|
2005-01-14 03:32:16 +00:00
|
|
|
exit(retval);
|
|
|
|
}
|
2004-06-21 05:03:56 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
for (i=0; i<outputs.size(); i++) {
|
|
|
|
OUTPUT& out = outputs[i];
|
|
|
|
if (out.zfile) out.zfile->close();
|
|
|
|
if (out.nzfile) out.nzfile->close();
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-09-17 17:56:59 +00:00
|
|
|
void usage(char* name) {
|
2007-09-27 15:08:40 +00:00
|
|
|
fprintf(stderr,
|
|
|
|
"This program generates XML files containing project statistics.\n"
|
|
|
|
"It should be run once a day as a periodic task in config.xml.\n"
|
2008-10-03 19:31:56 +00:00
|
|
|
"For more info, see http://boinc.berkeley.edu/trac/wiki/DbDump\n\n"
|
|
|
|
"Usage: %s [options]\n"
|
|
|
|
"Options:\n"
|
- server: change the following from per-host to per-(host, app version):
- daily quota mechanism
- reliable mechanism (accelerated retries)
- "trusted" mechanism (adaptive replication)
- scheduler: enforce host scale probation only for apps with
host_scale_check set.
- validator: do scale probation on invalid results
(need this in addition to error and timeout cases)
- feeder: update app version scales every 10 min, not 10 sec
- back-end apps: support --foo as well as -foo for options
Notes:
- If you have, say, cuda, cuda23 and cuda_fermi plan classes,
a host will have separate quotas for each one.
That means it could error out on 100 jobs for cuda_fermi,
and when its quota goes to zero,
error out on 100 jobs for cuda23, etc.
This is intentional; there may be cases where one version
works but not the others.
- host.error_rate and host.max_results_day are deprecated
TODO:
- the values in the app table for limits on jobs in progress etc.
should override rather than config.xml.
Implementation notes:
scheduler:
process_request():
read all host_app_versions for host at start;
Compute "reliable" and "trusted" for each one.
write modified records at end
get_app_version():
add "reliable_only" arg; if set, use only reliable versions
skip over-quota versions
Multi-pass scheduling: if have at least one reliable version,
do a pass for jobs that need reliable,
and use only reliable versions.
Then clear best_app_versions cache.
Score-based scheduling: for need-reliable jobs,
it will pick the fastest version,
then give a score bonus if that version happens to be reliable.
When get back a successful result from client:
increase daily quota
When get back an error result from client:
impose scale probation
decrease daily quota if not aborted
Validator:
when handling a WU, create a vector of HOST_APP_VERSION
parallel to vector of RESULT.
Pass it to assign_credit_set().
Make copies of originals so we can update only modified ones
update HOST_APP_VERSION error rates
Transitioner:
decrease quota on timeout
svn path=/trunk/boinc/; revision=21181
2010-04-15 03:13:56 +00:00
|
|
|
" --dump_spec filename Use the given config file (use ../db_dump_spec.xml)\n"
|
|
|
|
" [-d N | --debug_level] Set verbosity level (1 to 4)\n"
|
|
|
|
" [--db_host H] Use the DB server on host H\n"
|
|
|
|
" [-h | --help] Show this\n"
|
|
|
|
" [-v | --version] Show version information\n",
|
2009-09-17 17:56:59 +00:00
|
|
|
name
|
2007-09-27 15:08:40 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2003-01-30 23:03:52 +00:00
|
|
|
int main(int argc, char** argv) {
|
|
|
|
int retval, i;
|
2004-06-21 05:03:56 +00:00
|
|
|
DUMP_SPEC spec;
|
2004-09-30 17:41:02 +00:00
|
|
|
char* db_host = 0;
|
2004-06-21 05:03:56 +00:00
|
|
|
char spec_filename[256], buf[256];
|
2005-03-07 23:15:53 +00:00
|
|
|
FILE_LOCK file_lock;
|
2003-01-30 23:03:52 +00:00
|
|
|
|
2004-05-03 19:30:01 +00:00
|
|
|
check_stop_daemons();
|
2004-06-21 05:03:56 +00:00
|
|
|
setbuf(stderr, 0);
|
2003-03-08 00:09:40 +00:00
|
|
|
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_NORMAL, "db_dump starting\n");
|
2004-06-21 05:03:56 +00:00
|
|
|
strcpy(spec_filename, "");
|
2003-01-30 23:03:52 +00:00
|
|
|
for (i=1; i<argc; i++) {
|
- server: change the following from per-host to per-(host, app version):
- daily quota mechanism
- reliable mechanism (accelerated retries)
- "trusted" mechanism (adaptive replication)
- scheduler: enforce host scale probation only for apps with
host_scale_check set.
- validator: do scale probation on invalid results
(need this in addition to error and timeout cases)
- feeder: update app version scales every 10 min, not 10 sec
- back-end apps: support --foo as well as -foo for options
Notes:
- If you have, say, cuda, cuda23 and cuda_fermi plan classes,
a host will have separate quotas for each one.
That means it could error out on 100 jobs for cuda_fermi,
and when its quota goes to zero,
error out on 100 jobs for cuda23, etc.
This is intentional; there may be cases where one version
works but not the others.
- host.error_rate and host.max_results_day are deprecated
TODO:
- the values in the app table for limits on jobs in progress etc.
should override rather than config.xml.
Implementation notes:
scheduler:
process_request():
read all host_app_versions for host at start;
Compute "reliable" and "trusted" for each one.
write modified records at end
get_app_version():
add "reliable_only" arg; if set, use only reliable versions
skip over-quota versions
Multi-pass scheduling: if have at least one reliable version,
do a pass for jobs that need reliable,
and use only reliable versions.
Then clear best_app_versions cache.
Score-based scheduling: for need-reliable jobs,
it will pick the fastest version,
then give a score bonus if that version happens to be reliable.
When get back a successful result from client:
increase daily quota
When get back an error result from client:
impose scale probation
decrease daily quota if not aborted
Validator:
when handling a WU, create a vector of HOST_APP_VERSION
parallel to vector of RESULT.
Pass it to assign_credit_set().
Make copies of originals so we can update only modified ones
update HOST_APP_VERSION error rates
Transitioner:
decrease quota on timeout
svn path=/trunk/boinc/; revision=21181
2010-04-15 03:13:56 +00:00
|
|
|
if (is_arg(argv[i], "dump_spec")) {
|
|
|
|
if (!argv[++i]) {
|
2009-09-17 17:56:59 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL, "%s requires an argument\n\n", argv[--i]);
|
|
|
|
usage(argv[0]);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
safe_strcpy(spec_filename, argv[i]);
|
- server: change the following from per-host to per-(host, app version):
- daily quota mechanism
- reliable mechanism (accelerated retries)
- "trusted" mechanism (adaptive replication)
- scheduler: enforce host scale probation only for apps with
host_scale_check set.
- validator: do scale probation on invalid results
(need this in addition to error and timeout cases)
- feeder: update app version scales every 10 min, not 10 sec
- back-end apps: support --foo as well as -foo for options
Notes:
- If you have, say, cuda, cuda23 and cuda_fermi plan classes,
a host will have separate quotas for each one.
That means it could error out on 100 jobs for cuda_fermi,
and when its quota goes to zero,
error out on 100 jobs for cuda23, etc.
This is intentional; there may be cases where one version
works but not the others.
- host.error_rate and host.max_results_day are deprecated
TODO:
- the values in the app table for limits on jobs in progress etc.
should override rather than config.xml.
Implementation notes:
scheduler:
process_request():
read all host_app_versions for host at start;
Compute "reliable" and "trusted" for each one.
write modified records at end
get_app_version():
add "reliable_only" arg; if set, use only reliable versions
skip over-quota versions
Multi-pass scheduling: if have at least one reliable version,
do a pass for jobs that need reliable,
and use only reliable versions.
Then clear best_app_versions cache.
Score-based scheduling: for need-reliable jobs,
it will pick the fastest version,
then give a score bonus if that version happens to be reliable.
When get back a successful result from client:
increase daily quota
When get back an error result from client:
impose scale probation
decrease daily quota if not aborted
Validator:
when handling a WU, create a vector of HOST_APP_VERSION
parallel to vector of RESULT.
Pass it to assign_credit_set().
Make copies of originals so we can update only modified ones
update HOST_APP_VERSION error rates
Transitioner:
decrease quota on timeout
svn path=/trunk/boinc/; revision=21181
2010-04-15 03:13:56 +00:00
|
|
|
} else if (is_arg(argv[i], "d") || is_arg(argv[i], "debug_level")) {
|
|
|
|
if (!argv[++i]) {
|
2009-09-17 17:56:59 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL, "%s requires an argument\n\n", argv[--i]);
|
|
|
|
usage(argv[0]);
|
|
|
|
exit(1);
|
|
|
|
}
|
2010-04-05 21:59:33 +00:00
|
|
|
int dl = atoi(argv[i]);
|
|
|
|
log_messages.set_debug_level(dl);
|
|
|
|
if (dl == 4) g_print_queries = true;
|
- server: change the following from per-host to per-(host, app version):
- daily quota mechanism
- reliable mechanism (accelerated retries)
- "trusted" mechanism (adaptive replication)
- scheduler: enforce host scale probation only for apps with
host_scale_check set.
- validator: do scale probation on invalid results
(need this in addition to error and timeout cases)
- feeder: update app version scales every 10 min, not 10 sec
- back-end apps: support --foo as well as -foo for options
Notes:
- If you have, say, cuda, cuda23 and cuda_fermi plan classes,
a host will have separate quotas for each one.
That means it could error out on 100 jobs for cuda_fermi,
and when its quota goes to zero,
error out on 100 jobs for cuda23, etc.
This is intentional; there may be cases where one version
works but not the others.
- host.error_rate and host.max_results_day are deprecated
TODO:
- the values in the app table for limits on jobs in progress etc.
should override rather than config.xml.
Implementation notes:
scheduler:
process_request():
read all host_app_versions for host at start;
Compute "reliable" and "trusted" for each one.
write modified records at end
get_app_version():
add "reliable_only" arg; if set, use only reliable versions
skip over-quota versions
Multi-pass scheduling: if have at least one reliable version,
do a pass for jobs that need reliable,
and use only reliable versions.
Then clear best_app_versions cache.
Score-based scheduling: for need-reliable jobs,
it will pick the fastest version,
then give a score bonus if that version happens to be reliable.
When get back a successful result from client:
increase daily quota
When get back an error result from client:
impose scale probation
decrease daily quota if not aborted
Validator:
when handling a WU, create a vector of HOST_APP_VERSION
parallel to vector of RESULT.
Pass it to assign_credit_set().
Make copies of originals so we can update only modified ones
update HOST_APP_VERSION error rates
Transitioner:
decrease quota on timeout
svn path=/trunk/boinc/; revision=21181
2010-04-15 03:13:56 +00:00
|
|
|
} else if (is_arg(argv[i], "db_host")) {
|
2009-09-17 17:56:59 +00:00
|
|
|
if(!argv[++i]) {
|
|
|
|
log_messages.printf(MSG_CRITICAL, "%s requires an argument\n\n", argv[--i]);
|
|
|
|
usage(argv[0]);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
db_host = argv[i];
|
- server: change the following from per-host to per-(host, app version):
- daily quota mechanism
- reliable mechanism (accelerated retries)
- "trusted" mechanism (adaptive replication)
- scheduler: enforce host scale probation only for apps with
host_scale_check set.
- validator: do scale probation on invalid results
(need this in addition to error and timeout cases)
- feeder: update app version scales every 10 min, not 10 sec
- back-end apps: support --foo as well as -foo for options
Notes:
- If you have, say, cuda, cuda23 and cuda_fermi plan classes,
a host will have separate quotas for each one.
That means it could error out on 100 jobs for cuda_fermi,
and when its quota goes to zero,
error out on 100 jobs for cuda23, etc.
This is intentional; there may be cases where one version
works but not the others.
- host.error_rate and host.max_results_day are deprecated
TODO:
- the values in the app table for limits on jobs in progress etc.
should override rather than config.xml.
Implementation notes:
scheduler:
process_request():
read all host_app_versions for host at start;
Compute "reliable" and "trusted" for each one.
write modified records at end
get_app_version():
add "reliable_only" arg; if set, use only reliable versions
skip over-quota versions
Multi-pass scheduling: if have at least one reliable version,
do a pass for jobs that need reliable,
and use only reliable versions.
Then clear best_app_versions cache.
Score-based scheduling: for need-reliable jobs,
it will pick the fastest version,
then give a score bonus if that version happens to be reliable.
When get back a successful result from client:
increase daily quota
When get back an error result from client:
impose scale probation
decrease daily quota if not aborted
Validator:
when handling a WU, create a vector of HOST_APP_VERSION
parallel to vector of RESULT.
Pass it to assign_credit_set().
Make copies of originals so we can update only modified ones
update HOST_APP_VERSION error rates
Transitioner:
decrease quota on timeout
svn path=/trunk/boinc/; revision=21181
2010-04-15 03:13:56 +00:00
|
|
|
} else if (is_arg(argv[i], "h") || is_arg(argv[i], "help")) {
|
2009-09-17 17:56:59 +00:00
|
|
|
usage(argv[0]);
|
|
|
|
exit(0);
|
- server: change the following from per-host to per-(host, app version):
- daily quota mechanism
- reliable mechanism (accelerated retries)
- "trusted" mechanism (adaptive replication)
- scheduler: enforce host scale probation only for apps with
host_scale_check set.
- validator: do scale probation on invalid results
(need this in addition to error and timeout cases)
- feeder: update app version scales every 10 min, not 10 sec
- back-end apps: support --foo as well as -foo for options
Notes:
- If you have, say, cuda, cuda23 and cuda_fermi plan classes,
a host will have separate quotas for each one.
That means it could error out on 100 jobs for cuda_fermi,
and when its quota goes to zero,
error out on 100 jobs for cuda23, etc.
This is intentional; there may be cases where one version
works but not the others.
- host.error_rate and host.max_results_day are deprecated
TODO:
- the values in the app table for limits on jobs in progress etc.
should override rather than config.xml.
Implementation notes:
scheduler:
process_request():
read all host_app_versions for host at start;
Compute "reliable" and "trusted" for each one.
write modified records at end
get_app_version():
add "reliable_only" arg; if set, use only reliable versions
skip over-quota versions
Multi-pass scheduling: if have at least one reliable version,
do a pass for jobs that need reliable,
and use only reliable versions.
Then clear best_app_versions cache.
Score-based scheduling: for need-reliable jobs,
it will pick the fastest version,
then give a score bonus if that version happens to be reliable.
When get back a successful result from client:
increase daily quota
When get back an error result from client:
impose scale probation
decrease daily quota if not aborted
Validator:
when handling a WU, create a vector of HOST_APP_VERSION
parallel to vector of RESULT.
Pass it to assign_credit_set().
Make copies of originals so we can update only modified ones
update HOST_APP_VERSION error rates
Transitioner:
decrease quota on timeout
svn path=/trunk/boinc/; revision=21181
2010-04-15 03:13:56 +00:00
|
|
|
} else if (is_arg(argv[i], "v") || is_arg(argv[i], "version")) {
|
2009-09-17 17:56:59 +00:00
|
|
|
printf("%s\n", SVN_VERSION);
|
|
|
|
exit(0);
|
2004-09-30 17:41:02 +00:00
|
|
|
} else {
|
- server: change the following from per-host to per-(host, app version):
- daily quota mechanism
- reliable mechanism (accelerated retries)
- "trusted" mechanism (adaptive replication)
- scheduler: enforce host scale probation only for apps with
host_scale_check set.
- validator: do scale probation on invalid results
(need this in addition to error and timeout cases)
- feeder: update app version scales every 10 min, not 10 sec
- back-end apps: support --foo as well as -foo for options
Notes:
- If you have, say, cuda, cuda23 and cuda_fermi plan classes,
a host will have separate quotas for each one.
That means it could error out on 100 jobs for cuda_fermi,
and when its quota goes to zero,
error out on 100 jobs for cuda23, etc.
This is intentional; there may be cases where one version
works but not the others.
- host.error_rate and host.max_results_day are deprecated
TODO:
- the values in the app table for limits on jobs in progress etc.
should override rather than config.xml.
Implementation notes:
scheduler:
process_request():
read all host_app_versions for host at start;
Compute "reliable" and "trusted" for each one.
write modified records at end
get_app_version():
add "reliable_only" arg; if set, use only reliable versions
skip over-quota versions
Multi-pass scheduling: if have at least one reliable version,
do a pass for jobs that need reliable,
and use only reliable versions.
Then clear best_app_versions cache.
Score-based scheduling: for need-reliable jobs,
it will pick the fastest version,
then give a score bonus if that version happens to be reliable.
When get back a successful result from client:
increase daily quota
When get back an error result from client:
impose scale probation
decrease daily quota if not aborted
Validator:
when handling a WU, create a vector of HOST_APP_VERSION
parallel to vector of RESULT.
Pass it to assign_credit_set().
Make copies of originals so we can update only modified ones
update HOST_APP_VERSION error rates
Transitioner:
decrease quota on timeout
svn path=/trunk/boinc/; revision=21181
2010-04-15 03:13:56 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
|
|
|
"unknown command line argument: %s\n\n", argv[i]
|
|
|
|
);
|
2009-09-17 17:56:59 +00:00
|
|
|
usage(argv[0]);
|
|
|
|
exit(1);
|
2003-01-30 23:03:52 +00:00
|
|
|
}
|
|
|
|
}
|
2003-02-10 19:51:32 +00:00
|
|
|
|
2004-06-21 05:03:56 +00:00
|
|
|
if (!strlen(spec_filename)) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL, "no spec file given\n");
|
2009-09-17 17:56:59 +00:00
|
|
|
usage(argv[0]);
|
|
|
|
exit(1);
|
2004-06-21 05:03:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FILE* f = fopen(spec_filename, "r");
|
|
|
|
if (!f) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL, "spec file missing\n");
|
2004-06-21 05:03:56 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = spec.parse(f);
|
|
|
|
if (retval) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL, "can't parse spec file\n");
|
2003-03-18 20:29:59 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2004-06-21 05:03:56 +00:00
|
|
|
fclose(f);
|
|
|
|
|
2005-03-07 23:15:53 +00:00
|
|
|
if (file_lock.lock(LOCKFILE)) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL, "Another copy of db_dump is already running\n");
|
2003-02-10 19:51:32 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_NORMAL, "Starting\n");
|
2003-02-10 19:51:32 +00:00
|
|
|
|
2009-05-07 13:54:51 +00:00
|
|
|
retval = config.parse_file();
|
2003-01-29 23:03:19 +00:00
|
|
|
if (retval) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2009-05-07 13:54:51 +00:00
|
|
|
"Can't parse config.xml: %s\n", boincerror(retval)
|
2007-05-11 16:30:13 +00:00
|
|
|
);
|
2003-01-29 23:03:19 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
2004-09-30 17:41:02 +00:00
|
|
|
retval = boinc_db.open(
|
2011-01-28 22:03:46 +00:00
|
|
|
config.replica_db_name,
|
|
|
|
db_host?db_host:config.replica_db_host,
|
|
|
|
config.replica_db_user,
|
|
|
|
config.replica_db_passwd
|
2004-09-30 17:41:02 +00:00
|
|
|
);
|
2003-01-29 23:03:19 +00:00
|
|
|
if (retval) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL, "Can't open DB\n");
|
2003-01-29 23:03:19 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
2007-02-13 21:41:07 +00:00
|
|
|
retval = boinc_db.set_isolation_level(READ_UNCOMMITTED);
|
|
|
|
if (retval) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2010-11-08 17:51:57 +00:00
|
|
|
"boinc_db.set_isolation_level: %s; %s\n",
|
|
|
|
boincerror(retval), boinc_db.error_string()
|
2007-02-13 21:41:07 +00:00
|
|
|
);
|
|
|
|
}
|
2003-01-30 23:03:52 +00:00
|
|
|
|
2007-06-26 21:14:58 +00:00
|
|
|
boinc_mkdir(spec.output_dir);
|
2004-06-21 05:03:56 +00:00
|
|
|
|
|
|
|
unsigned int j;
|
|
|
|
for (j=0; j<spec.enumerations.size(); j++) {
|
|
|
|
ENUMERATION& e = spec.enumerations[j];
|
|
|
|
e.make_it_happen(spec.output_dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
tables_file(spec.output_dir);
|
|
|
|
|
|
|
|
sprintf(buf, "cp %s %s/db_dump.xml", spec_filename, spec.output_dir);
|
2007-04-18 20:49:58 +00:00
|
|
|
retval = system(buf);
|
|
|
|
if (retval) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL,
|
2010-11-08 17:51:57 +00:00
|
|
|
"%s failed: %s\n", buf, boincerror(retval)
|
2007-04-18 20:49:58 +00:00
|
|
|
);
|
|
|
|
exit(retval);
|
|
|
|
}
|
2005-01-28 00:48:05 +00:00
|
|
|
|
|
|
|
// rename the old stats dir to a name that includes the date
|
|
|
|
|
2010-02-17 18:09:10 +00:00
|
|
|
if (boinc_file_exists(spec.final_output_dir)) {
|
|
|
|
struct tm* tmp;
|
|
|
|
time_t now = time(0);
|
|
|
|
tmp = localtime(&now);
|
|
|
|
char base[256];
|
|
|
|
if (strlen(spec.archive_dir)) {
|
|
|
|
strcpy(base, spec.archive_dir);
|
|
|
|
strcat(base, "/stats");
|
|
|
|
} else {
|
|
|
|
strcpy(base, spec.final_output_dir);
|
|
|
|
}
|
|
|
|
sprintf(buf, "mv %s %s_%d_%d_%d_%d_%d_%d",
|
|
|
|
spec.final_output_dir,
|
|
|
|
base,
|
|
|
|
1900+tmp->tm_year,
|
|
|
|
tmp->tm_mon+1,
|
|
|
|
tmp->tm_mday,
|
|
|
|
tmp->tm_hour,
|
|
|
|
tmp->tm_min,
|
|
|
|
tmp->tm_sec
|
|
|
|
);
|
|
|
|
retval = system(buf);
|
|
|
|
if (retval) {
|
|
|
|
log_messages.printf(MSG_CRITICAL, "Can't rename old stats\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
2005-01-07 19:45:46 +00:00
|
|
|
}
|
|
|
|
sprintf(buf, "mv %s %s", spec.output_dir, spec.final_output_dir);
|
2006-12-27 20:04:50 +00:00
|
|
|
retval = system(buf);
|
|
|
|
if (retval) {
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_CRITICAL, "Can't rename new stats\n");
|
2006-12-27 20:04:50 +00:00
|
|
|
exit(1);
|
|
|
|
}
|
2008-02-21 21:00:58 +00:00
|
|
|
log_messages.printf(MSG_NORMAL, "db_dump finished\n");
|
2003-01-29 23:03:19 +00:00
|
|
|
}
|
2004-12-08 00:40:19 +00:00
|
|
|
|
2005-01-02 18:29:53 +00:00
|
|
|
const char *BOINC_RCSID_500089bde6 = "$Id$";
|