boinc/client/gui_rpc_server.C

652 lines
19 KiB
C++
Raw Normal View History

// The contents of this file are subject to the BOINC Public License
// Version 1.0 (the "License"); you may not use this file except in
// compliance with the License. You may obtain a copy of the License at
// http://boinc.berkeley.edu/license_1.0.txt
//
// Software distributed under the License is distributed on an "AS IS"
// basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
// License for the specific language governing rights and limitations
// under the License.
//
// The Original Code is the Berkeley Open Infrastructure for Network Computing.
//
// The Initial Developer of the Original Code is the SETI@home project.
// Portions created by the SETI@home project are Copyright (C) 2002
// University of California at Berkeley. All Rights Reserved.
//
// Contributor(s):
//
#ifdef _WIN32
#include "boinc_win.h"
#endif
#ifndef _WIN32
#include <stdio.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <vector>
#include <string.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#endif
#include "util.h"
#include "error_numbers.h"
#include "parse.h"
#include "file_names.h"
#include "client_msgs.h"
#include "client_state.h"
using std::string;
using std::vector;
#if defined(_WIN32)
typedef int socklen_t;
#elif defined ( __APPLE__)
typedef int32_t socklen_t;
#elif !GETSOCKOPT_SOCKLEN_T
#ifndef socklen_t
typedef size_t socklen_t;
#endif
#endif
static void boinc_close_socket(int sock) {
#ifdef _WIN32
closesocket(sock);
#else
close(sock);
#endif
}
GUI_RPC_CONN::GUI_RPC_CONN(int s) {
sock = s;
}
GUI_RPC_CONN::~GUI_RPC_CONN() {
boinc_close_socket(sock);
}
static void handle_get_project_status(MIOFILE& fout) {
unsigned int i;
fout.printf("<projects>\n");
for (i=0; i<gstate.projects.size(); i++) {
PROJECT* p = gstate.projects[i];
p->write_state(fout, true);
}
fout.printf("</projects>\n");
}
static void handle_get_disk_usage(MIOFILE& fout) {
unsigned int i;
double size;
fout.printf("<projects>\n");
for (i=0; i<gstate.projects.size(); i++) {
PROJECT* p = gstate.projects[i];
gstate.project_disk_usage(p, size);
fout.printf(
"<project>\n"
" <master_url>%s</master_url>\n"
" <disk_usage>%f</disk_usage>\n"
"</project>\n",
p->master_url, size
);
}
fout.printf("</projects>\n");
}
static PROJECT* get_project(char* buf, MIOFILE& fout) {
string url;
if (!parse_str(buf, "<project_url>", url)) {
fout.printf("<error>Missing project URL</error>\n");
return 0;
}
PROJECT* p = gstate.lookup_project(url.c_str());
if (!p) {
fout.printf("<error>No such project</error>\n");
return 0 ;
}
return p;
}
static void handle_result_show_graphics(char* buf, MIOFILE& fout) {
string result_name;
ACTIVE_TASK* atp;
int mode;
if (match_tag(buf, "<full_screen/>")) {
mode = MODE_FULLSCREEN;
} else {
mode = MODE_WINDOW;
}
if (parse_str(buf, "<result_name>", result_name)) {
PROJECT* p = get_project(buf, fout);
if (!p) {
fout.printf("<error>No such project</error>\n");
return;
}
RESULT* rp = gstate.lookup_result(p, result_name.c_str());
if (!rp) {
fout.printf("<error>No such result</error>\n");
return;
}
atp = gstate.lookup_active_task_by_result(rp);
if (!atp || atp->scheduler_state != CPU_SCHED_SCHEDULED) {
fout.printf("<error>Result not active</error>\n");
return;
}
atp->request_graphics_mode(mode);
} else {
for (unsigned int i=0; i<gstate.active_tasks.active_tasks.size(); i++) {
atp = gstate.active_tasks.active_tasks[i];
if (atp->scheduler_state != CPU_SCHED_SCHEDULED) continue;
atp->request_graphics_mode(mode);
}
}
fout.printf("<success/>\n");
}
static void handle_project_op(char* buf, MIOFILE& fout, char* op) {
PROJECT* p = get_project(buf, fout);
if (!p) {
fout.printf("<error>no such project</error>\n");
return;
}
if (!strcmp(op, "reset")) {
gstate.reset_project(p);
} else if (!strcmp(op, "suspend")) {
p->suspended_via_gui = true;
} else if (!strcmp(op, "resume")) {
p->suspended_via_gui = false;
} else if (!strcmp(op, "detach")) {
gstate.detach_project(p);
} else if (!strcmp(op, "update")) {
p->sched_rpc_pending = true;
p->min_rpc_time = 0;
}
gstate.must_schedule_cpus = true;
fout.printf("<success/>\n");
}
static void handle_project_attach(char* buf, MIOFILE& fout) {
string url, authenticator;
if (!parse_str(buf, "<project_url>", url)) {
fout.printf("<error>Missing URL</error>\n");
return;
}
if (!parse_str(buf, "<authenticator>", authenticator)) {
fout.printf("<error>Missing authenticator</error>\n");
return;
}
gstate.add_project(url.c_str(), authenticator.c_str());
fout.printf("<success/>\n");
}
static void handle_set_run_mode(char* buf, MIOFILE& fout) {
if (match_tag(buf, "<always")) {
gstate.user_run_request = USER_RUN_REQUEST_ALWAYS;
} else if (match_tag(buf, "<never")) {
gstate.user_run_request = USER_RUN_REQUEST_NEVER;
} else if (match_tag(buf, "<auto")) {
gstate.user_run_request = USER_RUN_REQUEST_AUTO;
} else {
fout.printf("<error>Missing mode</error>\n");
return;
}
fout.printf("<success/>\n");
}
static void handle_get_run_mode(char* , MIOFILE& fout) {
fout.printf("<run_mode>\n");
switch (gstate.user_run_request) {
case USER_RUN_REQUEST_ALWAYS:
fout.printf("<always/>\n");
break;
case USER_RUN_REQUEST_NEVER:
fout.printf("<never/>\n");
break;
case USER_RUN_REQUEST_AUTO:
fout.printf("<auto/>\n");
break;
default:
fout.printf("<error>Unknown run mode</error>\n");
}
fout.printf("</run_mode>\n");
}
static void handle_set_network_mode(char* buf, MIOFILE& fout) {
if (match_tag(buf, "<always")) {
gstate.user_network_request = USER_RUN_REQUEST_ALWAYS;
} else if (match_tag(buf, "<never")) {
gstate.user_network_request = USER_RUN_REQUEST_NEVER;
} else {
fout.printf("<error>Missing mode</error>\n");
return;
}
fout.printf("<success/>\n");
}
static void handle_get_network_mode(char* , MIOFILE& fout) {
fout.printf("<network_mode>\n");
switch (gstate.user_network_request) {
case USER_RUN_REQUEST_ALWAYS:
fout.printf("<always/>\n");
break;
case USER_RUN_REQUEST_NEVER:
fout.printf("<never/>\n");
break;
default:
fout.printf("<error>Unknown network mode</error>\n");
}
fout.printf("</network_mode>\n");
}
static void handle_run_benchmarks(char* , MIOFILE& fout) {
gstate.start_cpu_benchmarks();
fout.printf("<success/>\n");
}
static void handle_set_proxy_settings(char* buf, MIOFILE& fout) {
MIOFILE in;
in.init_buf(buf);
gstate.proxy_info.parse(in);
gstate.set_client_state_dirty("Set proxy settings RPC");
fout.printf("<success/>\n");
}
static void handle_get_proxy_settings(char* , MIOFILE& fout) {
gstate.proxy_info.write(fout);
}
// params:
// [ <seqno>n</seqno> ]
// return only msgs with seqno > n; if absent or zero, return all
//
void handle_get_messages(char* buf, MIOFILE& fout) {
int seqno=0, i, j;
unsigned int k;
MESSAGE_DESC* mdp;
bool found=false;
parse_int(buf, "<seqno>", seqno);
j = message_descs.size()-1;
for (k=0; k<message_descs.size(); k++) {
mdp = message_descs[k];
if (mdp->seqno <= seqno) {
found = true;
j = k-1;
break;
}
}
fout.printf("<msgs>\n");
for (i=j; i>=0; i--) {
mdp = message_descs[i];
fout.printf(
"<msg>\n"
" <pri>%d</pri>\n"
" <seqno>%d</seqno>\n"
" <body>\n%s\n</body>\n"
" <time>%d</time>\n",
mdp->priority,
mdp->seqno,
mdp->message.c_str(),
mdp->timestamp
);
if (mdp->project) {
fout.printf(
" <project>%s</project>\n",
mdp->project->get_project_name()
);
}
fout.printf("</msg>\n");
}
fout.printf("</msgs>\n");
}
// <retry_file_transfer>
// <project_url>XXX</project_url>
// <filename>XXX</filename>
// </retry_file_transfer>
static void handle_file_transfer_op(char* buf, MIOFILE& fout, char* op) {
string filename;
PROJECT* p = get_project(buf, fout);
if (!p) {
fout.printf("<error>No such project</error>\n");
return;
}
if (!parse_str(buf, "<filename>", filename)) {
fout.printf("<error>Missing filename</error>\n");
return;
}
FILE_INFO* f = gstate.lookup_file_info(p, filename.c_str());
if (!f) {
fout.printf("<error>No such file</error>\n");
return;
}
if (!f->pers_file_xfer) {
fout.printf("<error>No such transfer waiting</error>\n");
return;
}
if (!strcmp(op, "retry")) {
f->pers_file_xfer->next_request_time = 0;
} else if (!strcmp(op, "abort")) {
f->pers_file_xfer->abort();
} else {
fout.printf("<error>unknown op</error>\n");
return;
}
fout.printf("<success/>\n");
}
static void handle_result_op(char* buf, MIOFILE& fout, char* op) {
RESULT* rp;
char result_name[256];
ACTIVE_TASK* atp;
PROJECT* p = get_project(buf, fout);
if (!p) {
fout.printf("<error>No such project</error>\n");
return;
}
if (!parse_str(buf, "<name>", result_name, sizeof(result_name))) {
fout.printf("<error>Missing result name</error>\n");
return;
}
rp = gstate.lookup_result(p, result_name);
if (!rp) {
fout.printf("<error>no such result</error>\n");
return;
}
atp = gstate.lookup_active_task_by_result(rp);
if (!atp) {
fout.printf("<error>result is not active</error>\n");
return;
}
if (!strcmp(op, "abort")) {
atp->abort_task("aborted via GUI RPC");
} else if (!strcmp(op, "suspend")) {
atp->suspended_via_gui = true;
} else if (!strcmp(op, "resume")) {
atp->suspended_via_gui = false;
}
gstate.must_schedule_cpus = true;
fout.printf("<success/>\n");
}
static void handle_get_host_info(char*, MIOFILE& fout) {
gstate.host_info.write(fout);
}
int GUI_RPC_CONN::handle_rpc() {
char request_msg[1024];
int n;
MIOFILE mf;
MFILE m;
char* p;
mf.init_mfile(&m);
SCOPE_MSG_LOG scope_messages(log_messages, CLIENT_MSG_LOG::DEBUG_GUIRPC);
// read the request message in one read()
// so that the core client won't hang because
// of malformed request msgs
//
#ifdef _WIN32
n = recv(sock, request_msg, 1024, 0);
#else
n = read(sock, request_msg, 1024);
#endif
if (n <= 0) return -1;
request_msg[n] = 0;
scope_messages.printf("GUI RPC Command = '%s'\n", request_msg);
if (match_tag(request_msg, "<get_state")) {
gstate.write_state_gui(mf);
} else if (match_tag(request_msg, "<get_results")) {
gstate.write_tasks_gui(mf);
} else if (match_tag(request_msg, "<get_file_transfers")) {
gstate.write_file_transfers_gui(mf);
} else if (match_tag(request_msg, "<get_project_status")) {
handle_get_project_status(mf);
} else if (match_tag(request_msg, "<get_disk_usage")) {
handle_get_disk_usage(mf);
} else if (match_tag(request_msg, "<result_show_graphics")) {
handle_result_show_graphics(request_msg, mf);
} else if (match_tag(request_msg, "<project_reset")) {
handle_project_op(request_msg, mf, "reset");
} else if (match_tag(request_msg, "<project_attach")) {
handle_project_attach(request_msg, mf);
} else if (match_tag(request_msg, "<project_detach")) {
handle_project_op(request_msg, mf, "detach");
} else if (match_tag(request_msg, "<project_update")) {
handle_project_op(request_msg, mf, "update");
} else if (match_tag(request_msg, "<project_suspend")) {
handle_project_op(request_msg, mf, "suspend");
} else if (match_tag(request_msg, "<project_resume")) {
handle_project_op(request_msg, mf, "resume");
} else if (match_tag(request_msg, "<set_run_mode")) {
handle_set_run_mode(request_msg, mf);
} else if (match_tag(request_msg, "<get_run_mode")) {
handle_get_run_mode(request_msg, mf);
} else if (match_tag(request_msg, "<set_network_mode")) {
handle_set_network_mode(request_msg, mf);
} else if (match_tag(request_msg, "<get_network_mode")) {
handle_get_network_mode(request_msg, mf);
} else if (match_tag(request_msg, "<run_benchmarks")) {
handle_run_benchmarks(request_msg, mf);
} else if (match_tag(request_msg, "<set_proxy_settings")) {
handle_set_proxy_settings(request_msg, mf);
} else if (match_tag(request_msg, "<get_proxy_settings")) {
handle_get_proxy_settings(request_msg, mf);
} else if (match_tag(request_msg, "<get_messages")) {
handle_get_messages(request_msg, mf);
} else if (match_tag(request_msg, "<retry_file_transfer")) {
handle_file_transfer_op(request_msg, mf, "retry");
} else if (match_tag(request_msg, "<abort_file_transfer")) {
handle_file_transfer_op(request_msg, mf, "abort");
} else if (match_tag(request_msg, "<abort_result")) {
handle_result_op(request_msg, mf, "abort");
} else if (match_tag(request_msg, "<suspend_result")) {
handle_result_op(request_msg, mf, "suspend");
} else if (match_tag(request_msg, "<resume_result")) {
handle_result_op(request_msg, mf, "resume");
} else if (match_tag(request_msg, "<get_host_info")) {
handle_get_host_info(request_msg, mf);
} else {
mf.printf("<error>unrecognized op</error>\n");
}
mf.printf("\003");
m.get_buf(p, n);
send(sock, p, n, 0);
if (p) free(p);
return 0;
}
int GUI_RPC_CONN_SET::get_allowed_hosts() {
SCOPE_MSG_LOG scope_messages(log_messages, CLIENT_MSG_LOG::DEBUG_STATE);
// add localhost
allowed_remote_ip_addresses.push_back(0x7f000001);
NET_XFER temp; // network address resolver is in this class
int ipaddr;
char buf[256];
// open file remote_hosts.cfg and read in the
// allowed host list and resolve them to an ip address
//
FILE* f = fopen(REMOTEHOST_FILE_NAME, "r");
if (f != NULL) {
scope_messages.printf(
"GUI_RPC_CONN_SET::get_allowed_hosts(): found allowed hosts list\n"
);
// read in each line, if it is not a comment
// then resolve the address and add to our
// allowed list
memset(buf,0,sizeof(buf));
while (fgets(buf, 256, f) != NULL) {
strip_whitespace(buf);
if (!(buf[0] =='#' || buf[0] == ';') && strlen(buf) > 0 ) {
// resolve and add
if (temp.get_ip_addr(buf, ipaddr) == 0) {
allowed_remote_ip_addresses.push_back((int)ntohl(ipaddr));
}
}
}
fclose(f);
}
return 0;
}
int GUI_RPC_CONN_SET::insert(GUI_RPC_CONN* p) {
gui_rpcs.push_back(p);
return 0;
}
int GUI_RPC_CONN_SET::init() {
sockaddr_in addr;
int retval;
// get list of hosts allowed to do GUI RPCs
//
get_allowed_hosts();
lsock = socket(AF_INET, SOCK_STREAM, 0);
if (lsock < 0) {
msg_printf(NULL, MSG_ERROR,
"GUI RPC failed to create socket: %d\n", lsock
);
return ERR_SOCKET;
}
addr.sin_family = AF_INET;
addr.sin_port = htons(GUI_RPC_PORT);
addr.sin_addr.s_addr = htonl(INADDR_ANY);
int one = 1;
setsockopt(lsock, SOL_SOCKET, SO_REUSEADDR, (char*)&one, 4);
retval = bind(lsock, (const sockaddr*)(&addr), sizeof(addr));
if (retval) {
msg_printf(NULL, MSG_ERROR, "GUI RPC bind failed: %d\n", retval);
boinc_close_socket(lsock);
lsock = -1;
return ERR_BIND;
}
retval = listen(lsock, 999);
if (retval) {
msg_printf(NULL, MSG_ERROR, "GUI RPC listen failed: %d\n", retval);
boinc_close_socket(lsock);
lsock = -1;
return ERR_LISTEN;
}
return 0;
}
bool GUI_RPC_CONN_SET::poll(double) {
int n = 0;
unsigned int i;
fd_set read_fds, error_fds;
int sock, retval;
vector<GUI_RPC_CONN*>::iterator iter;
GUI_RPC_CONN* gr;
struct timeval tv;
if (lsock < 0) return false;
FD_ZERO(&read_fds);
FD_ZERO(&error_fds);
FD_SET(lsock, &read_fds);
for (i=0; i<gui_rpcs.size(); i++) {
gr = gui_rpcs[i];
FD_SET(gr->sock, &read_fds);
FD_SET(gr->sock, &error_fds);
}
memset(&tv, 0, sizeof(tv));
n = select(FD_SETSIZE, &read_fds, 0, &error_fds, &tv);
if (FD_ISSET(lsock, &read_fds)) {
struct sockaddr_in addr;
socklen_t addr_len = sizeof(addr);
sock = accept(lsock, (struct sockaddr*)&addr, &addr_len);
int peer_ip = (int) ntohl(addr.sin_addr.s_addr);
// check list of allowed remote hosts
bool allowed = false;
vector<int>::iterator remote_iter;
remote_iter = allowed_remote_ip_addresses.begin();
while (remote_iter != allowed_remote_ip_addresses.end() ) {
int remote_host = *remote_iter;
if (peer_ip == remote_host) allowed = true;
remote_iter++;
}
// accept the connection if:
// 1) allow_remote_gui_rpc is set or
// 2) client host is included in "remote_hosts" file or
// 3) client is on localhost
//
if ( !(gstate.allow_remote_gui_rpc) && !(allowed)) {
in_addr ia;
ia.s_addr = htonl(peer_ip);
msg_printf(
NULL, MSG_ERROR,
"GUI RPC request from non-allowed address %s\n",
inet_ntoa(ia)
);
boinc_close_socket(sock);
} else {
GUI_RPC_CONN* gr = new GUI_RPC_CONN(sock);
insert(gr);
}
}
iter = gui_rpcs.begin();
while (iter != gui_rpcs.end()) {
gr = *iter;
if (FD_ISSET(gr->sock, &error_fds)) {
delete gr;
gui_rpcs.erase(iter);
} else {
iter++;
}
}
iter = gui_rpcs.begin();
while (iter != gui_rpcs.end()) {
gr = *iter;
if (FD_ISSET(gr->sock, &read_fds)) {
retval = gr->handle_rpc();
if (retval) {
delete gr;
gui_rpcs.erase(iter);
continue;
}
}
iter++;
}
return (n > 0);
}