s3: do some retries in ListBucket, to work around Amazon-vs-Go's HTTP/TLS bug

Change-Id: Idfc8187d07f558f2e7c590dc6c14c66791f46e57
This commit is contained in:
Brad Fitzpatrick 2014-03-17 22:59:02 -07:00
parent bfbe29fc4a
commit bfde349851
2 changed files with 46 additions and 10 deletions

View File

@ -51,6 +51,7 @@ func TestNextStr(t *testing.T) {
{"abc", "abd"},
{"ab\xff", "ac\x00"},
{"a\xff\xff", "b\x00\x00"},
{"sha1-da39a3ee5e6b4b0d3255bfef95601890afd80709", "sha1-da39a3ee5e6b4b0d3255bfef95601890afd8070:"},
}
for _, tt := range tests {
if got := nextStr(tt.s); got != tt.want {

View File

@ -27,10 +27,12 @@ import (
"hash"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strconv"
"time"
"camlistore.org/pkg/httputil"
)
@ -145,6 +147,9 @@ type Item struct {
type listBucketResults struct {
Contents []*Item
IsTruncated bool
MaxKeys int
Name string // bucket name
Marker string
}
// ListBucket returns 0 to maxKeys (inclusive) items from the provided
@ -163,18 +168,47 @@ func (c *Client) ListBucket(bucket string, startAt string, maxKeys int) (items [
fetchN = maxList
}
var bres listBucketResults
url_ := fmt.Sprintf("https://%s.%s/?marker=%s&max-keys=%d",
bucket, c.hostname(), url.QueryEscape(marker), fetchN)
req := newReq(url_)
c.Auth.SignRequest(req)
res, err := c.httpClient().Do(req)
if err != nil {
return nil, err
}
err = xml.NewDecoder(res.Body).Decode(&bres)
httputil.CloseBody(res.Body)
if err != nil {
return nil, err
// Try the enumerate three times, since Amazon likes to close
// https connections a lot, and Go sucks at dealing with it:
// https://code.google.com/p/go/issues/detail?id=3514
const maxTries = 5
for try := 1; try <= maxTries; try++ {
time.Sleep(time.Duration(try-1) * 100 * time.Millisecond)
req := newReq(url_)
c.Auth.SignRequest(req)
res, err := c.httpClient().Do(req)
if err != nil {
if try < maxTries {
continue
}
return nil, err
}
if res.StatusCode != 200 {
err = fmt.Errorf("s3.enumerate: status code %v", res.StatusCode)
} else {
bres = listBucketResults{}
var logbuf bytes.Buffer
err = xml.NewDecoder(io.TeeReader(res.Body, &logbuf)).Decode(&bres)
if err != nil {
log.Printf("Error parsing s3 XML response: %v for %q", err, logbuf.Bytes())
} else if bres.MaxKeys != fetchN || bres.Name != bucket || bres.Marker != marker {
err = fmt.Errorf("Unexpected parse from server: %#v from: %s", bres, logbuf.Bytes())
log.Print(err)
}
}
httputil.CloseBody(res.Body)
if err != nil {
if try < maxTries-1 {
continue
}
log.Print(err)
return nil, err
}
break
}
for _, it := range bres.Contents {
if it.Key == marker && it.Key != startAt {
@ -188,6 +222,7 @@ func (c *Client) ListBucket(bucket string, startAt string, maxKeys int) (items [
marker = it.Key
}
if !bres.IsTruncated {
// log.Printf("Not truncated. so breaking. items = %d; len Contents = %d, url = %s", len(items), len(bres.Contents), url_)
break
}
}