make camput use schema.WriteFile; add flag to enable rollsum splits

Change-Id: If436f7aa5d8360fbe1aaa07795b326e1eb5066f4
This commit is contained in:
Brad Fitzpatrick 2011-09-28 11:16:51 -07:00
parent 7f48b1e9a9
commit 2ec0a974b6
3 changed files with 25 additions and 13 deletions

View File

@ -1 +1 @@
6g version weekly.2011-09-21 9872
6g version weekly.2011-09-21 9887

View File

@ -28,6 +28,7 @@ import (
"sort"
"camli/blobref"
"camli/blobserver/remote"
"camli/client"
"camli/schema"
"camli/jsonsign"
@ -166,7 +167,7 @@ func (up *Uploader) releaseUploadToken() {
<-up.filecapc
}
func (up *Uploader) UploadFile(filename string) (respr *client.PutResult, outerr os.Error) {
func (up *Uploader) UploadFile(filename string, rollSplits bool) (respr *client.PutResult, outerr os.Error) {
up.getUploadToken()
defer up.releaseUploadToken()
@ -192,21 +193,30 @@ func (up *Uploader) UploadFile(filename string) (respr *client.PutResult, outerr
switch {
case fi.IsRegular():
// Put the blob of the file itself. (TODO: smart boundary chunking)
// For now we just store it as one range.
blobpr, err := up.UploadFileBlob(filename)
file, err := os.Open(filename)
if err != nil {
return nil, err
}
parts := []schema.BytesPart{{BlobRef: blobpr.BlobRef, Size: uint64(blobpr.Size)}}
if blobpr.Size != fi.Size {
// TODO: handle races of file changing while reading it
// after the stat.
}
defer file.Close()
storage := remote.NewFromClient(up.Client)
m["camliType"] = "file"
if err = schema.PopulateParts(m, fi.Size, parts); err != nil {
schemaWriteFileMap := schema.WriteFileMap
if rollSplits {
schemaWriteFileMap = schema.WriteFileMapRolling
}
blobref, err := schemaWriteFileMap(storage, m, io.LimitReader(file, fi.Size))
if err != nil {
return nil, err
}
// TODO(bradfitz): taking a PutResult here is kinda
// gross. should instead make a blobserver.Storage
// wrapper type that can track some of this? or that
// updates the client stats directly or something.
{
json, _ := schema.MapToCamliJson(m)
pr := &client.PutResult{BlobRef: blobref, Size: int64(len(json)), Skipped: false}
return pr, nil
}
case fi.IsSymlink():
if err = schema.PopulateSymlinkMap(m, filename); err != nil {
return nil, err
@ -248,7 +258,7 @@ func (up *Uploader) UploadFile(filename string) (respr *client.PutResult, outerr
for _, name := range dirNames {
rate <- true
go func(dirEntName string) {
pr, err := up.UploadFile(filename + "/" + dirEntName)
pr, err := up.UploadFile(filename + "/" + dirEntName, rollSplits)
if pr == nil && err == nil {
log.Fatalf("nil/nil from up.UploadFile on %q", filename+"/"+dirEntName)
}

View File

@ -31,6 +31,7 @@ type fileCmd struct {
tag string
makePermanode bool
rollSplits bool
havecache, statcache bool
}
@ -44,6 +45,7 @@ func init() {
flags.BoolVar(&cmd.havecache, "statcache", false, "Use the stat cache, assuming unchanged files already uploaded in the past are still there. Fast, but potentially dangerous.")
flags.BoolVar(&cmd.statcache, "havecache", false, "Use the 'have cache', a cache keeping track of what blobs the remote server should already have from previous uploads.")
flags.BoolVar(&cmd.rollSplits, "rolling", false, "Use rolling checksum file splits.")
flagCacheLog = flags.Bool("logcache", false, "log caching details")
@ -100,7 +102,7 @@ func (c *fileCmd) RunCommand(up *Uploader, args []string) os.Error {
}
for _, arg := range args {
lastPut, err = up.UploadFile(arg)
lastPut, err = up.UploadFile(arg, c.rollSplits)
handleResult("file", lastPut, err)
if permaNode != nil {