buffer overflow

svn path=/trunk/boinc/; revision=2263
This commit is contained in:
David Anderson 2003-09-04 20:11:47 +00:00
parent bce67613cb
commit c6c3126a9d
10 changed files with 92 additions and 36 deletions

View File

@ -6121,3 +6121,27 @@ quarl 2003/09/03
tools_other.php tools_other.php
single_host_server.php single_host_server.php
create_project.php create_project.php
David Sept 4 2003
- fix bug in scheduler: the buffer in insert_wu_tags() wasn't large enough.
This is a powerful argument in favor of using "string" everywhere!
NOTE: in spite of fix, test_uc.py isn't working. Problem w/ proxy?
- HTTP_OP_SET::poll(): if header read_reply() returns nonzero,
set htp->io_ready to false (to avoid lots and lots of spurious recv()s)
- in test scripts: if TEST_STOP_BEFORE_RUN is set,
go into sleep loop rather than exiting
(to avoid deleting the test setup)
- factor out get_socket_error()
client/
client_state.C
cs_scheduler.C
http.C
main.C
net_xfer.C,h
scheduler_op.C
sched/
handle_request.C
main.C
test/
testbase.py

View File

@ -1585,11 +1585,11 @@ bool CLIENT_STATE::time_to_exit() {
&& app_started && app_started
&& (difftime(time(0), app_started) >= exit_after_app_start_secs) && (difftime(time(0), app_started) >= exit_after_app_start_secs)
) { ) {
printf("exiting because time is up: %d\n", exit_after_app_start_secs); msg_printf(NULL, MSG_INFO, "exiting because time is up: %d\n", exit_after_app_start_secs);
return true; return true;
} }
if (exit_when_idle && (results.size() == 0) && contacted_sched_server) { if (exit_when_idle && (results.size() == 0) && contacted_sched_server) {
printf("exiting because no more results\n"); msg_printf(NULL, MSG_INFO, "exiting because no more results\n");
return true; return true;
} }
return false; return false;

View File

@ -383,10 +383,7 @@ int CLIENT_STATE::handle_scheduler_reply(
contacted_sched_server = true; contacted_sched_server = true;
ScopeMessages scope_messages(log_messages, ClientMessages::DEBUG_SCHED_OP); ScopeMessages scope_messages(log_messages, ClientMessages::DEBUG_SCHED_OP);
scope_messages.printf_file( scope_messages.printf_file(SCHED_OP_RESULT_FILE, "reply: ");
SCHED_OP_RESULT_FILE,
"CLIENT_STATE::handle_scheduler_reply(): reply: "
);
f = fopen(SCHED_OP_RESULT_FILE, "r"); f = fopen(SCHED_OP_RESULT_FILE, "r");
if (!f) return ERR_FOPEN; if (!f) return ERR_FOPEN;

View File

@ -126,16 +126,14 @@ static void http_post_request_header(
); );
} }
void HTTP_REPLY_HEADER::init() void HTTP_REPLY_HEADER::init() {
{
status = 500; status = 500;
content_length = 0; content_length = 0;
redirect_location.erase(); redirect_location.erase();
recv_buf.erase(); recv_buf.erase();
} }
void HTTP_REPLY_HEADER::parse() void HTTP_REPLY_HEADER::parse() {
{
ScopeMessages scope_messages(log_messages, ClientMessages::DEBUG_HTTP); ScopeMessages scope_messages(log_messages, ClientMessages::DEBUG_HTTP);
istringstream h(recv_buf); istringstream h(recv_buf);
@ -172,21 +170,26 @@ const unsigned int MAX_HEADER_SIZE = 1024;
// Parse an http reply header into the header struct // Parse an http reply header into the header struct
// //
// Returns 1 if we are not done yet, 0 if done (header.status indicates // Returns 1 if not done yet, 0 if done (header.status indicates success)
// success) //
int HTTP_REPLY_HEADER::read_reply(int socket) int HTTP_REPLY_HEADER::read_reply(int socket) {
{
ScopeMessages scope_messages(log_messages, ClientMessages::DEBUG_HTTP); ScopeMessages scope_messages(log_messages, ClientMessages::DEBUG_HTTP);
while (recv_buf.size() < MAX_HEADER_SIZE) { while (recv_buf.size() < MAX_HEADER_SIZE) {
char c; char c;
errno = 0;
int n = recv(socket, &c, 1, 0); int n = recv(socket, &c, 1, 0);
scope_messages.printf(
"HTTP_REPLY_HEADER::read_reply(): recv() on socket %d returned %d errno %d sockerr %d\n",
socket, n, errno, get_socket_error(socket)
);
if (n == -1 && errno == EAGAIN) { if (n == -1 && errno == EAGAIN) {
scope_messages.printf("HTTP_REPLY_HEADER::read_reply(): recv() returned %d (EAGAIN)\n", n);
return 1; return 1;
} }
// if n is zero, we've reached EOF (and that's an error)
//
if (n != 1) { if (n != 1) {
scope_messages.printf("HTTP_REPLY_HEADER::read_reply(): recv() returned %d\n", n);
break; break;
} }
@ -195,15 +198,19 @@ int HTTP_REPLY_HEADER::read_reply(int socket)
if (ends_with(recv_buf, "\n\n")) { if (ends_with(recv_buf, "\n\n")) {
scope_messages.printf_multiline(recv_buf.c_str(), scope_messages.printf_multiline(recv_buf.c_str(),
"HTTP_REPLY_HEADER::read_reply(): header: "); "HTTP_REPLY_HEADER::read_reply(): header: "
);
parse(); parse();
return 0; return 0;
} }
} }
// error occurred // error occurred; status will be 500 (from constructor)
scope_messages.printf("HTTP_REPLY_HEADER::read_reply(): returning error (recv_buf.size=%d)\n", //
recv_buf.size()); scope_messages.printf(
"HTTP_REPLY_HEADER::read_reply(): returning error (recv_buf=%s)\n",
recv_buf.c_str()
);
return 0; return 0;
} }
@ -471,13 +478,17 @@ bool HTTP_OP_SET::poll() {
htp->io_ready = false; htp->io_ready = false;
htp->io_done = false; htp->io_done = false;
} }
// TODO: intentional no break here? -- quarl break;
case HTTP_STATE_REPLY_HEADER: case HTTP_STATE_REPLY_HEADER:
if (htp->io_ready) { if (htp->io_ready) {
action = true; action = true;
scope_messages.printf("HTTP_OP_SET::poll(): got reply header; %p io_done %d\n", htp, htp->io_done); scope_messages.printf(
"HTTP_OP_SET::poll(): reading reply header; io_ready %d io_done %d\n",
htp->io_ready, htp->io_done
);
if (htp->hrh.read_reply(htp->socket)) { if (htp->hrh.read_reply(htp->socket)) {
// not done yet // not done yet
htp->io_ready = false;
break; break;
} }

View File

@ -163,7 +163,12 @@ int main(int argc, char** argv) {
fflush(stdout); fflush(stdout);
} }
if (gstate.time_to_exit() || gstate.requested_exit) { if (gstate.time_to_exit()) {
msg_printf(NULL, MSG_INFO, "Time to exit");
break;
}
if (gstate.requested_exit) {
msg_printf(NULL, MSG_INFO, "Exit requested by signal");
break; break;
} }
} }

View File

@ -73,6 +73,19 @@ typedef int socklen_t;
typedef size_t socklen_t; typedef size_t socklen_t;
#endif #endif
int get_socket_error(int fd) {
socklen_t intsize = sizeof(int);
int n;
#ifdef _WIN32
getsockopt(fd, SOL_SOCKET, SO_ERROR, (char *)&n, &intsize);
#elif __APPLE__
getsockopt(fd, SOL_SOCKET, SO_ERROR, &n, (int *)&intsize);
#else
getsockopt(fd, SOL_SOCKET, SO_ERROR, (void*)&n, &intsize);
#endif
return n;
}
int NET_XFER::get_ip_addr(char *hostname, int &ip_addr) { int NET_XFER::get_ip_addr(char *hostname, int &ip_addr) {
hostent* hep; hostent* hep;
@ -325,7 +338,6 @@ int NET_XFER_SET::do_select(double& bytes_transferred, timeval& timeout) {
int n, fd, retval, nsocks_queried; int n, fd, retval, nsocks_queried;
socklen_t i; socklen_t i;
NET_XFER *nxp; NET_XFER *nxp;
socklen_t intsize = sizeof(int);
ScopeMessages scope_messages(log_messages, ClientMessages::DEBUG_NET_XFER); ScopeMessages scope_messages(log_messages, ClientMessages::DEBUG_NET_XFER);
@ -392,14 +404,14 @@ int NET_XFER_SET::do_select(double& bytes_transferred, timeval& timeout) {
nxp = net_xfers[i]; nxp = net_xfers[i];
fd = nxp->socket; fd = nxp->socket;
if (FD_ISSET(fd, &read_fds) || FD_ISSET(fd, &write_fds)) { if (FD_ISSET(fd, &read_fds) || FD_ISSET(fd, &write_fds)) {
if (FD_ISSET(fd, &read_fds)) {
scope_messages.printf("NET_XFER_SET::do_select(): read enabled on socket %d\n", fd);
}
if (FD_ISSET(fd, &write_fds)) {
scope_messages.printf("NET_XFER_SET::do_select(): write enabled on socket %d\n", fd);
}
if (!nxp->is_connected) { if (!nxp->is_connected) {
#ifdef _WIN32 n = get_socket_error(fd);
getsockopt(fd, SOL_SOCKET, SO_ERROR, (char *)&n, &intsize);
#elif __APPLE__
getsockopt(fd, SOL_SOCKET, SO_ERROR, &n, (int *)&intsize);
#else
getsockopt(fd, SOL_SOCKET, SO_ERROR, (void*)&n, &intsize);
#endif
if (n) { if (n) {
scope_messages.printf( scope_messages.printf(
"NET_XFER_SET::do_select(): socket %d connection to %s failed\n", "NET_XFER_SET::do_select(): socket %d connection to %s failed\n",

View File

@ -94,4 +94,6 @@ public:
void check_active(bool&, bool&); void check_active(bool&, bool&);
}; };
extern int get_socket_error(int fd);
#endif #endif

View File

@ -199,7 +199,7 @@ int SCHEDULER_OP::start_rpc() {
); );
} }
scope_messages.printf_file(SCHED_OP_REQUEST_FILE, "SCHEDULER_OP::start_rpc(): request xml: "); scope_messages.printf_file(SCHED_OP_REQUEST_FILE, "req:");
if (gstate.use_http_proxy) { if (gstate.use_http_proxy) {
http_op.use_http_proxy = true; http_op.use_http_proxy = true;

View File

@ -127,7 +127,7 @@ int insert_after(char* buffer, char* after, char* text) {
// it to a client // it to a client
// //
int insert_wu_tags(WORKUNIT& wu, APP& app) { int insert_wu_tags(WORKUNIT& wu, APP& app) {
char buf[256]; char buf[MAX_BLOB_SIZE];
sprintf(buf, sprintf(buf,
" <rsc_fpops_est>%f</rsc_fpops_est>\n" " <rsc_fpops_est>%f</rsc_fpops_est>\n"
@ -186,7 +186,7 @@ int insert_app_file_tags(APP_VERSION& av, USER& user) {
vector<APP_FILE> app_files; vector<APP_FILE> app_files;
APP_FILE af; APP_FILE af;
unsigned int i; unsigned int i;
char buf[256], name[256]; char buf[1024], name[256];
int retval; int retval;
parse_project_prefs(user.project_prefs, app_files); parse_project_prefs(user.project_prefs, app_files);
@ -660,14 +660,14 @@ int send_work(
WORKUNIT wu; WORKUNIT wu;
DB_RESULT result, result_copy; DB_RESULT result, result_copy;
if (sreq.work_req_seconds <= 0) return 0;
log_messages.printf( log_messages.printf(
SchedMessages::NORMAL, SchedMessages::NORMAL,
"[HOST#%d] got request for %d seconds of work\n", "[HOST#%d] got request for %d seconds of work\n",
reply.host.id, sreq.work_req_seconds reply.host.id, sreq.work_req_seconds
); );
if (sreq.work_req_seconds <= 0) return 0;
seconds_to_fill = sreq.work_req_seconds; seconds_to_fill = sreq.work_req_seconds;
if (seconds_to_fill > MAX_SECONDS_TO_SEND) { if (seconds_to_fill > MAX_SECONDS_TO_SEND) {
seconds_to_fill = MAX_SECONDS_TO_SEND; seconds_to_fill = MAX_SECONDS_TO_SEND;

View File

@ -610,6 +610,11 @@ def run_check_all():
all_projects.start_progress_meter() all_projects.start_progress_meter()
all_projects.run_init_wait() all_projects.run_init_wait()
if os.environ.get('TEST_STOP_BEFORE_HOST_RUN'): if os.environ.get('TEST_STOP_BEFORE_HOST_RUN'):
verbose_echo(1, 'stopped')
# wait instead of killing backend procs.
# (Is there a better way to do this?)
while (1):
time.sleep(1)
raise SystemExit, 'Stopped due to $TEST_STOP_BEFORE_HOST_RUN' raise SystemExit, 'Stopped due to $TEST_STOP_BEFORE_HOST_RUN'
# all_hosts.run(asynch=True) # all_hosts.run(asynch=True)
all_hosts.run() all_hosts.run()