From 76a3468b543e79fb2350547e4b3675fc32303d50 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Sun, 2 Mar 2003 19:24:09 +0000 Subject: [PATCH] garbage collect app versions; bandwidth limit interface svn path=/trunk/boinc/; revision=997 --- checkin_notes | 20 ++++++++ client/client_state.C | 68 ++++++++++++++++++++------ client/client_types.h | 1 + client/cs_files.C | 13 ++--- client/hostinfo_unix.C | 4 +- client/http.C | 107 ++++++++--------------------------------- client/http.h | 2 - client/main.C | 6 --- html/user/prefs.inc | 50 ++++++++++++++----- sched/handle_request.C | 8 +-- todo | 11 ++--- 11 files changed, 148 insertions(+), 142 deletions(-) diff --git a/checkin_notes b/checkin_notes index 3f7b643886..95eb358bbd 100755 --- a/checkin_notes +++ b/checkin_notes @@ -3569,3 +3569,23 @@ David Feb 28 2003 client/ client_types.C,h + +David Mar 2 2003 + - added code to garbage-collect APP_VERSIONs: + delete any APP_VERSION that's not referenced and for which + there's a later version of the same app. + Also garbage-collect executable files + (previously, they were never deleted). + - fixed minor bug (file offset on redirected POST2 HTTP op) + - added network bandwidth limits to global prefs web interface + + todo + client/ + client_state.C,h + cs_files.C + hostinfo_unix.C + http.C,h + html_user/ + prefs.inc + sched/ + handle_request.C diff --git a/client/client_state.C b/client/client_state.C index fab8472650..860b193019 100644 --- a/client/client_state.C +++ b/client/client_state.C @@ -575,9 +575,7 @@ int CLIENT_STATE::parse_state_file() { // could put logic here to detect incompatible state files // after core client update } else if (match_tag(buf, "")) { - // TODO: handle old client state file if different version } else if (match_tag(buf, "")) { - // TODO: handle old client state file if different version } else if (match_tag(buf, "")) { global_prefs.confirm_before_connecting = true; } else if (match_tag(buf, "")) { @@ -590,8 +588,6 @@ int CLIENT_STATE::parse_state_file() { } else if (parse_str(buf, "", socks_user_passwd, sizeof(socks_user_passwd))) { } else { fprintf(stderr, "CLIENT_STATE::parse_state_file: unrecognized: %s\n", buf); - retval = ERR_XML_PARSE; - goto done; } } done: @@ -678,8 +674,8 @@ int CLIENT_STATE::write_state_file() { return 0; } -// TODO: write no more often than X seconds // Write the client_state.xml file if necessary +// TODO: write no more often than X seconds // int CLIENT_STATE::write_state_file_if_needed() { int retval; @@ -937,18 +933,20 @@ void CLIENT_STATE::print_summary() { // delete unneeded records and files // bool CLIENT_STATE::garbage_collect() { - unsigned int i; + unsigned int i, j; int failnum; FILE_INFO* fip; RESULT* rp; WORKUNIT* wup; + APP_VERSION* avp, *avp2; vector::iterator result_iter; vector::iterator wu_iter; vector::iterator fi_iter; - bool action = false; + vector::iterator avp_iter; + bool action = false, found; - // zero references counts on WUs and FILE_INFOs + // zero references counts on WUs, FILE_INFOs and APP_VERSIONs for (i=0; iref_cnt = 0; @@ -957,6 +955,10 @@ bool CLIENT_STATE::garbage_collect() { fip = file_infos[i]; fip->ref_cnt = 0; } + for (i=0; iref_cnt = 0; + } // delete RESULTs that have been finished and reported; // reference-count files referred to by other results @@ -1010,7 +1012,7 @@ bool CLIENT_STATE::garbage_collect() { } // delete WORKUNITs not referenced by any result; - // reference-count files referred to by other WUs + // reference-count files and APP_VERSIONs referred to by other WUs // wu_iter = workunits.begin(); while (wu_iter != workunits.end()) { @@ -1026,18 +1028,56 @@ bool CLIENT_STATE::garbage_collect() { for (i=0; iinput_files.size(); i++) { wup->input_files[i].file_info->ref_cnt++; } + wup->avp->ref_cnt++; wu_iter++; } } - // delete FILE_INFOs (and corresponding files) - // that are not referenced by any WORKUNIT or RESULT, - // and are not sticky. + // go through APP_VERSIONs; + // delete any not referenced by any WORKUNIT + // and having a more recent version. + // + avp_iter = app_versions.begin(); + while (avp_iter != app_versions.end()) { + avp = *avp_iter; + if (avp->ref_cnt == 0) { + found = false; + for (j=0; japp==avp->app && avp2->version_num>avp->version_num) { + found = true; + break; + } + } + if (found) { + delete avp; + avp_iter = app_versions.erase(avp_iter); + action = true; + } else { + avp_iter++; + } + } else { + avp_iter++; + } + } + + // Then go through remaining APP_VERSIONs, + // bumping refcnt of associated files. + // + for (i=0; iapp_files.size(); j++) { + avp->app_files[i].file_info->ref_cnt++; + } + } + + // delete FILE_INFOs (and corresponding files) that are not sticky + // and are not referenced by any WORKUNIT, RESULT or APP_VERSION // fi_iter = file_infos.begin(); while (fi_iter != file_infos.end()) { fip = *fi_iter; - if (fip->ref_cnt==0 && !fip->sticky && !fip->executable) { + if (fip->ref_cnt==0 && !fip->sticky) { fip->delete_file(); if (log_flags.state_debug) printf("deleting file %s\n", fip->name); delete fip; @@ -1048,8 +1088,6 @@ bool CLIENT_STATE::garbage_collect() { } } - // TODO: delete obsolete APP_VERSIONs - if (action && log_flags.state_debug) { print_summary(); } diff --git a/client/client_types.h b/client/client_types.h index 05911b3f97..6ac59e8f16 100644 --- a/client/client_types.h +++ b/client/client_types.h @@ -170,6 +170,7 @@ struct APP_VERSION { APP* app; PROJECT* project; vector app_files; + int ref_cnt; int parse(FILE*); int write(FILE*); diff --git a/client/cs_files.C b/client/cs_files.C index 9c10eeee80..edc20b2d36 100644 --- a/client/cs_files.C +++ b/client/cs_files.C @@ -39,16 +39,15 @@ #include "client_state.h" #include "error_numbers.h" -// Decide whether to start a new file transfer +// Decide whether to consider starting a new file transfer +// TODO: limit the number of file xfers in some way // bool CLIENT_STATE::start_new_file_xfer() { - // TODO: limit the number of file xfers in some way return true; } -// Make a directory for each of the projects present -// in the client state +// Make a directory for each of the projects in the client state // int CLIENT_STATE::make_project_dirs() { unsigned int i; @@ -102,7 +101,6 @@ bool CLIENT_STATE::handle_pers_file_xfers() { FILE_INFO* fip; PERS_FILE_XFER *pfx; bool action = false; - int retval; // Look for FILE_INFOs for which we should start a transfer, // and make PERS_FILE_XFERs for them @@ -115,15 +113,14 @@ bool CLIENT_STATE::handle_pers_file_xfers() { pfx = new PERS_FILE_XFER; pfx->init(fip, false); fip->pers_file_xfer = pfx; - retval = pers_xfers->insert(fip->pers_file_xfer); - // TODO: return error? + pers_xfers->insert(fip->pers_file_xfer); action = true; } else if (fip->upload_when_present && fip->status == FILE_PRESENT && !fip->uploaded) { pfx = new PERS_FILE_XFER; pfx->init(fip, true); fip->pers_file_xfer = pfx; - retval = pers_xfers->insert(fip->pers_file_xfer); + pers_xfers->insert(fip->pers_file_xfer); action = true; } } diff --git a/client/hostinfo_unix.C b/client/hostinfo_unix.C index 1e419293a8..662920b830 100644 --- a/client/hostinfo_unix.C +++ b/client/hostinfo_unix.C @@ -94,7 +94,6 @@ char* ip_addr_string(int ip_addr) { // int get_timezone( void ) { tzset(); - // TODO: get this to work on all platforms // TODO: take daylight savings time into account #ifdef HAVE_GMTOFF time_t cur_time; @@ -136,7 +135,8 @@ bool host_is_running_on_batteries() { #ifdef linux -// Determine the memory specifications for this host, including RAM and swap space +// Determine the memory sizes for this host, +// including RAM and swap space // void parse_meminfo(HOST_INFO& host) { char buf[256]; diff --git a/client/http.C b/client/http.C index 2f01bba15a..8ca06b6e6e 100644 --- a/client/http.C +++ b/client/http.C @@ -41,9 +41,7 @@ #define HTTP_BLOCKSIZE 16384 -// Breaks a url down into its server and file path components -// TODO: deal with alternate protocols (ftp, gopher, etc) or disallow -// them and parse accordingly +// Breaks a HTTP url down into its server and file path components // void parse_url(char* url, char* host, int &port, char* file) { char* p; @@ -121,42 +119,6 @@ static void http_post_request_header( ); } -#if 0 -// Do we still need this? -// -void http_put_request_header( - char* buf, char* host, char* file, int size, int offset -) { - if (offset) { - sprintf(buf, - "PUT /%s HTTP/1.1\015\012" - "Pragma: no-cache\015\012" - "Cache-Control: no-cache\015\012" - "Host: %s:80\015\012" - "Range: bytes=%d-\015\012" - "Connection: close\015\012" - "Content-Type: application/octet-stream\015\012" - "Content-Length: %d\015\012" - "\015\012", - file, host, offset, size - ); - } else { - sprintf(buf, - "PUT /%s HTTP/1.1\015\012" - "Pragma: no-cache\015\012" - "Cache-Control: no-cache\015\012" - "Host: %s:80\015\012" - "Connection: close\015\012" - "Content-Type: application/octet-stream\015\012" - "Content-Length: %d\015\012" - "\015\012", - file, - host, size - ); - } -} -#endif - // Parse an http reply header into the header struct // int read_http_reply_header(int socket, HTTP_REPLY_HEADER& header) { @@ -191,7 +153,7 @@ int read_http_reply_header(int socket, HTTP_REPLY_HEADER& header) { header.redirect_location[n] = p[n]; n++; } - p[n] = '\0'; + header.redirect_location[n] = 0; } return 0; } @@ -231,7 +193,7 @@ HTTP_OP::HTTP_OP() { HTTP_OP::~HTTP_OP() { } -// Initialize HTTP HEAD operation to url +// Initialize HTTP HEAD operation // int HTTP_OP::init_head(char* url) { char proxy_buf[256]; @@ -248,7 +210,7 @@ int HTTP_OP::init_head(char* url) { return 0; } -// Initialize HTTP GET operation to url +// Initialize HTTP GET operation // int HTTP_OP::init_get(char* url, char* out, bool del_old_file, double off) { char proxy_buf[256]; @@ -271,7 +233,7 @@ int HTTP_OP::init_get(char* url, char* out, bool del_old_file, double off) { return 0; } -// Initialize HTTP POST operation to url +// Initialize HTTP POST operation // int HTTP_OP::init_post(char* url, char* in, char* out) { int retval; @@ -301,7 +263,7 @@ int HTTP_OP::init_post(char* url, char* in, char* out) { return 0; } -// Initialize HTTP POST operation to url including file offset +// Initialize HTTP POST operation // int HTTP_OP::init_post2( char* url, char* r1, char* in, double offset @@ -337,26 +299,6 @@ int HTTP_OP::init_post2( return 0; } -#if 0 -// not currently used -int HTTP_OP::init_put(char* url, char* in, int off) { - int retval; - - offset = off; - parse_url(url, hostname, port, filename); - NET_XFER::init(use_http_proxy?proxy_server_name:hostname, use_http_proxy?proxy_server_port:port, HTTP_BLOCKSIZE); - strcpy(infile, in); - retval = file_size(infile, content_length); - if (retval) return retval; - http_op_type = HTTP_OP_PUT; - http_op_state = HTTP_STATE_CONNECTING; - http_put_request_header( - request_header, hostname, filename, content_length, offset - ); - return 0; -} -#endif - // Returns true if the HTTP operation is complete // bool HTTP_OP::http_op_done() { @@ -367,7 +309,7 @@ HTTP_OP_SET::HTTP_OP_SET(NET_XFER_SET* p) { net_xfers = p; } -// Inserts an HTTP_OP into the set +// Adds an HTTP_OP to the set // int HTTP_OP_SET::insert(HTTP_OP* ho) { int retval; @@ -388,8 +330,6 @@ bool HTTP_OP_SET::poll() { htp = http_ops[i]; switch(htp->http_op_state) { case HTTP_STATE_CONNECTING: - // If the op is in the connecting state, and we notice it is done - // connecting, move it to the HTTP_STATE_REQUEST_HEADER state if (htp->is_connected) { htp->http_op_state = HTTP_STATE_REQUEST_HEADER; htp->want_upload = true; @@ -399,7 +339,10 @@ bool HTTP_OP_SET::poll() { case HTTP_STATE_REQUEST_HEADER: if (htp->io_ready) { action = true; - n = send(htp->socket, htp->request_header, strlen(htp->request_header), 0); + n = send( + htp->socket, htp->request_header, + strlen(htp->request_header), 0 + ); if (log_flags.http_debug) { printf( "wrote HTTP header to socket %d: %d bytes\n%s", @@ -409,7 +352,6 @@ bool HTTP_OP_SET::poll() { htp->io_ready = false; switch(htp->http_op_type) { case HTTP_OP_POST: - //case HTTP_OP_PUT: htp->http_op_state = HTTP_STATE_REQUEST_BODY; htp->file = fopen(htp->infile, "rb"); if (!htp->file) { @@ -438,8 +380,7 @@ bool HTTP_OP_SET::poll() { action = true; n = send(htp->socket, htp->req1, strlen(htp->req1), 0); htp->http_op_state = HTTP_STATE_REQUEST_BODY; - // If there's a file we also want to send, then start transferring - // it, otherwise, go on to the next step + if (htp->infile && strlen(htp->infile) > 0) { htp->file = fopen(htp->infile, "rb"); if (!htp->file) { @@ -480,27 +421,28 @@ bool HTTP_OP_SET::poll() { action = true; if (log_flags.http_debug) printf("got reply header; %x io_done %d\n", (unsigned int)htp, htp->io_done); read_http_reply_header(htp->socket, htp->hrh); + // TODO: handle all kinds of redirects here + if (htp->hrh.status == HTTP_STATUS_MOVED_PERM || htp->hrh.status == HTTP_STATUS_MOVED_TEMP) { - // Close the old socket htp->close_socket(); switch (htp->http_op_type) { case HTTP_OP_HEAD: - htp->init_head( htp->hrh.redirect_location ); + htp->init_head(htp->hrh.redirect_location); break; case HTTP_OP_GET: - // *** Not sure if delete_old_file should be true - htp->init_get( htp->hrh.redirect_location, htp->outfile, true ); + htp->init_get(htp->hrh.redirect_location, htp->outfile, false); break; case HTTP_OP_POST: - htp->init_post( htp->hrh.redirect_location, htp->infile, htp->outfile ); + htp->init_post(htp->hrh.redirect_location, htp->infile, htp->outfile); break; case HTTP_OP_POST2: - // TODO: Change offset to correct value - htp->init_post2( htp->hrh.redirect_location, htp->req1, htp->infile, 0 ); + htp->init_post2(htp->hrh.redirect_location, htp->req1, htp->infile, htp->file_offset); break; } + // Open connection to the redirected server + // htp->open_server(); break; } @@ -522,10 +464,6 @@ bool HTTP_OP_SET::poll() { case HTTP_OP_GET: htp->http_op_state = HTTP_STATE_REPLY_BODY; - // TODO: - // Append to a file if it already exists, otherwise - // create a new one. init_get should have already - // deleted the file if necessary htp->file = fopen(htp->outfile, "ab"); if (!htp->file) { fprintf(stderr, @@ -544,11 +482,6 @@ bool HTTP_OP_SET::poll() { htp->io_ready = false; htp->io_done = true; break; -#if 0 - case HTTP_OP_PUT: - htp->http_op_state = HTTP_STATE_DONE; - htp->http_op_retval = 0; -#endif } } break; diff --git a/client/http.h b/client/http.h index 8bbec63658..5b093a53a2 100644 --- a/client/http.h +++ b/client/http.h @@ -42,7 +42,6 @@ struct HTTP_REPLY_HEADER { // For the first 4, data source/sink are files #define HTTP_OP_GET 1 #define HTTP_OP_POST 2 -//#define HTTP_OP_PUT 3 #define HTTP_OP_HEAD 4 #define HTTP_OP_POST2 5 // a POST operation where the request comes from a combination @@ -77,7 +76,6 @@ public: int init_post2( char* url, char* req1, char* infile, double offset ); - //int init_put(char* url, char* infile, int offset=0); bool http_op_done(); }; diff --git a/client/main.C b/client/main.C index dabc06667b..86242be186 100644 --- a/client/main.C +++ b/client/main.C @@ -48,7 +48,6 @@ void show_message(PROJECT *p, char* message, char* priority) { // Prompt user for project URL and authenticator, // and create an account file -// TODO: use better input method here, backspace doesn't always seem to work // int add_new_project() { char master_url[256]; @@ -63,11 +62,6 @@ int add_new_project() { ); scanf("%s", authenticator); - // TODO: might be a good idea to verify the account key - // by doing an RPC to a scheduling server. - // But this would require fetching and parsing the master file, - // so to heck with it. - write_account_file(master_url, authenticator); return 0; } diff --git a/html/user/prefs.inc b/html/user/prefs.inc index 551c87b857..5832825c46 100644 --- a/html/user/prefs.inc +++ b/html/user/prefs.inc @@ -37,7 +37,7 @@ global $text; global $parse_result; global $in_project_specific; -// the following will parse either global or project prefs +// the following parses either global or project prefs // TODO: split up into separate functions // function element_start($parser, $name, $attrs) { @@ -93,6 +93,12 @@ function element_end($parser, $name) { case "disk_min_free_gb": $parse_result->disk_min_free_gb = $text; break; + case "max_bytes_sec_down": + $parse_result->max_bytes_sec_down = $text; + break; + case "max_bytes_sec_up": + $parse_result->max_bytes_sec_up = $text; + break; case "resource_share": $parse_result->resource_share = $text; break; @@ -135,6 +141,8 @@ function default_prefs() { $p->disk_max_used_gb = 100; $p->disk_max_used_pct = 50; $p->disk_min_free_gb = 1; + $p->max_bytes_sec_down = 0; + $p->max_bytes_sec_up = 0; $p->resource_share = 100; $p->show_email = false; @@ -146,7 +154,7 @@ function default_prefs() { // state of prefs before parsing; initialize all booleans to false // -function initial_prefs() { +function initialize_prefs_before_parsing() { $p = default_prefs(); $p->show_email = false; $p->send_email = false; @@ -161,7 +169,7 @@ function initial_prefs() { function prefs_parse($prefs_xml) { global $parse_result; - $parse_result = initial_prefs(); + $parse_result = initialize_prefs_before_parsing(); $xml_parser = xml_parser_create(); xml_parser_set_option($xml_parser, XML_OPTION_CASE_FOLDING, 0); @@ -173,16 +181,20 @@ function prefs_parse($prefs_xml) { //////////////////////////////////////////// // -// display preference subsets, with Edit buttons +// display preference subsets // function prefs_show_global($prefs) { - row2("Work if computer on batteries:", $prefs->run_on_batteries?"Yes":"No"); - row2("Work if computer in use:", $prefs->run_if_user_active?"Yes":"No"); - row2("Confirm before connecting to network:", $prefs->confirm_before_connecting?"Yes":"No"); + row2("Work if computer on batteries?", $prefs->run_on_batteries?"Yes":"No"); + row2("Work if computer in use?", $prefs->run_if_user_active?"Yes":"No"); + row2("Confirm before connecting to network?", $prefs->confirm_before_connecting?"Yes":"No"); row2("Amount of work to buffer:", "$prefs->work_buf_min_days to $prefs->work_buf_max_days days"); row2("Maximum disk space to use:", "$prefs->disk_max_used_gb GB"); row2("Minimum disk space to leave free:", "$prefs->disk_min_free_gb GB"); row2("Maximum % of disk allowed to used:", "$prefs->disk_max_used_pct %"); + $x = $prefs->max_bytes_sec_down; + row2("Maximum bytes/sec download:", $x?"$x":"No limit"); + $x = $prefs->max_bytes_sec_up; + row2("Maximum bytes/sec upload:", $x?"$x":"No limit"); } function prefs_show_resource($prefs) { @@ -264,15 +276,23 @@ function prefs_form_global($user, $prefs) { Use no more than Gbytes - - Leave at least - Gbytes free - + Leave at least + Gbytes free Use no more than % of total space "; + $d = $prefs->max_bytes_sec_down; + $dt = $d?"$d":""; + $u = $prefs->max_bytes_sec_up; + $ut = $u?"$u":""; + echo " + Maximum bytes/sec download + + Maximum bytes/sec upload + + "; } function prefs_form_email($prefs) { @@ -350,6 +370,8 @@ function prefs_global_parse_form(&$prefs) { $disk_max_used_gb = $_GET["disk_max_used_gb"]; $disk_max_used_pct = $_GET["disk_max_used_pct"]; $disk_min_free_gb = $_GET["disk_min_free_gb"]; + $max_bytes_sec_down = $_GET["max_bytes_sec_down"]; + $max_bytes_sec_up = $_GET["max_bytes_sec_up"]; $prefs->run_on_batteries = ($run_on_batteries == "yes"); $prefs->run_if_user_active = ($run_if_user_active == "yes"); @@ -368,6 +390,8 @@ function prefs_global_parse_form(&$prefs) { $prefs->disk_max_used_gb = $disk_max_used_gb; $prefs->disk_max_used_pct = $disk_max_used_pct; $prefs->disk_min_free_gb = $disk_min_free_gb; + $prefs->max_bytes_sec_down = $max_bytes_sec_down; + $prefs->max_bytes_sec_up = $max_bytes_sec_up; } function prefs_resource_parse_form(&$prefs) { @@ -407,7 +431,9 @@ function global_prefs_make_xml($prefs) { $xml = $xml ."$prefs->disk_max_used_gb\n" ."$prefs->disk_max_used_pct\n" - ."$prefs->disk_min_free_gb\n"; + ."$prefs->disk_min_free_gb\n" + ."$prefs->max_bytes_sec_down\n" + ."$prefs->max_bytes_sec_up\n"; $xml = $xml."\n"; return $xml; } diff --git a/sched/handle_request.C b/sched/handle_request.C index 0f05f9fa47..1d429f9bd8 100644 --- a/sched/handle_request.C +++ b/sched/handle_request.C @@ -215,8 +215,9 @@ int authenticate_user(SCHEDULER_REQUEST& sreq, SCHEDULER_REPLY& reply) { retval = db_user_lookup_auth(reply.user); if (retval) { strcpy(reply.message, - "Invalid or missing authenticator. " - "Visit this project's web site to get an authenticator."); + "Invalid or missing account ID. " + "Visit this project's web site to get an account ID." + ); strcpy(reply.message_priority, "low"); reply.request_delay = 120; sprintf(buf, "Bad authenticator: %s\n", sreq.authenticator); @@ -280,7 +281,8 @@ int update_host_record(SCHEDULER_REQUEST& sreq, HOST& host) { host.connected_frac = sreq.host.connected_frac; host.active_frac = sreq.host.active_frac; host.p_ncpus = sreq.host.p_ncpus; - strncpy(host.p_vendor, sreq.host.p_vendor, sizeof(host.p_vendor)); // unlikely this will change + strncpy(host.p_vendor, sreq.host.p_vendor, sizeof(host.p_vendor)); + // unlikely this will change strncpy(host.p_model, sreq.host.p_model, sizeof(host.p_model)); host.p_fpops = sreq.host.p_fpops; host.p_iops = sreq.host.p_iops; diff --git a/todo b/todo index 190fde8f6e..052ddf41b0 100755 --- a/todo +++ b/todo @@ -1,13 +1,10 @@ ----------------------- BUGS (arranged from high to low priority) ----------------------- -- Test suspend/resume functionality on Windows, - no way to suspend/resume on UNIX - "Show Graphics" menu item brings up minimized window, client does not remember window size/pos after close/reopen, window closes and does not reopen when workunit finishes and new workunit starts -- Screensaver "blank screen" functionality not implemented - CPU time updates infrequently (every 10 seconds), should there be a user control for this? - Client treats URL "maggie/ap/" different than URL "maggie/ap", @@ -22,6 +19,8 @@ BUGS (arranged from high to low priority) HIGH-PRIORITY (should do for beta test) ----------------------- +- Implement Screensaver "blank screen" functionality + multiple preference sets implement server watchdogs @@ -36,6 +35,8 @@ Messages from core client ----------------------- THINGS TO TEST (preferably with test scripts) ----------------------- +- Test suspend/resume functionality on Windows, + no way to suspend/resume on UNIX - verify that if file xfer is interrupted, it resumes at right place (and progress bar is correct) - result reissue @@ -48,10 +49,6 @@ MEDIUM-PRIORITY (should do before public release) make get_local_ip_addr() work in all cases -implement max bytes/sec network preferences -implement bandwidth limiting - current code assumes 1-second poll loop; wrong - Implement FIFO mechanism in scheduler for results that can't be sent user profiles on web (borrow logic from SETI@home)