From 97978b56d4f4ff679d6998095dafb7e5eabfff41 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 8 Jun 2011 18:56:56 -0700 Subject: [PATCH] don't retry forever indexing files. --- lib/go/camli/mysqlindexer/receive.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/go/camli/mysqlindexer/receive.go b/lib/go/camli/mysqlindexer/receive.go index 499a6c88d..a61a93494 100644 --- a/lib/go/camli/mysqlindexer/receive.go +++ b/lib/go/camli/mysqlindexer/receive.go @@ -221,7 +221,14 @@ func (mi *Indexer) populateFile(client *mysql.Client, blobRef *blobref.BlobRef, fr := ss.NewFileReader(seekFetcher) n, err := io.Copy(sha1, fr) if err != nil { - return err + // TODO: job scheduling system to retry this spaced + // out max n times. Right now our options are + // ignoring this error (forever) or returning the + // error and making the indexing try again (likely + // forever failing). Both options suck. For now just + // log and act like all's okay. + log.Printf("mysqlindex: error indexing file %s: %v", blobRef, err) + return nil } log.Printf("file %s blobref is %s, size %d", blobRef, blobref.FromHash("sha1", sha1), n) err = execSQL(client,