adding keys to fields to make go vet happy

Change-Id: I28e38da6f5499c3284e647b1c123bcfc882120f7
This commit is contained in:
Stephen Searles 2016-01-07 18:20:50 -08:00
parent ea4b5e477a
commit 23457fb56a
9 changed files with 22 additions and 22 deletions

View File

@ -885,9 +885,9 @@ func (pk *packer) writeAZip(trunc blob.Ref) (err error) {
var dataOffset int64
for _, br := range dataRefsWritten {
size := pk.dataSize[br]
mf.DataBlobs = append(mf.DataBlobs, BlobAndPos{blob.SizedRef{br, size}, dataOffset})
mf.DataBlobs = append(mf.DataBlobs, BlobAndPos{blob.SizedRef{Ref: br, Size: size}, dataOffset})
zipBlobs = append(zipBlobs, BlobAndPos{blob.SizedRef{br, size}, dataStart + dataOffset})
zipBlobs = append(zipBlobs, BlobAndPos{blob.SizedRef{Ref: br, Size: size}, dataStart + dataOffset})
dataOffset += int64(size)
}
@ -899,7 +899,7 @@ func (pk *packer) writeAZip(trunc blob.Ref) (err error) {
check(err)
check(zw.Flush())
b := pk.schemaBlob[br]
zipBlobs = append(zipBlobs, BlobAndPos{blob.SizedRef{br, b.Size()}, cw.n})
zipBlobs = append(zipBlobs, BlobAndPos{blob.SizedRef{Ref: br, Size: b.Size()}, cw.n})
rc := b.Open()
n, err := io.Copy(fw, rc)
rc.Close()
@ -1027,7 +1027,7 @@ func (s *storage) foreachZipBlob(zipRef blob.Ref, fn func(BlobAndPos) error) err
return err
}
if err := fn(BlobAndPos{
SizedRef: blob.SizedRef{br, uint32(f.UncompressedSize64)},
SizedRef: blob.SizedRef{Ref: br, Size: uint32(f.UncompressedSize64)},
Offset: off,
}); err != nil {
return err

View File

@ -234,7 +234,7 @@ func (s *storage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
if err != nil {
continue
}
dest <- blob.SizedRef{br, plainSize}
dest <- blob.SizedRef{Ref: br, Size: plainSize}
}
return nil
}
@ -276,7 +276,7 @@ func (s *storage) ReceiveBlob(plainBR blob.Ref, source io.Reader) (sb blob.Sized
return sb, fmt.Errorf("encrypt: error updating index for encrypted %v (plaintext %v): %v", encBR, plainBR, err)
}
return blob.SizedRef{plainBR, uint32(plainSize)}, nil
return blob.SizedRef{Ref: plainBR, Size: uint32(plainSize)}, nil
}
func (s *storage) Fetch(plainBR blob.Ref) (file io.ReadCloser, size uint32, err error) {
@ -346,7 +346,7 @@ func (s *storage) EnumerateBlobs(ctx context.Context, dest chan<- blob.SizedRef,
panic("Bogus encrypt index value: " + iter.Value())
}
select {
case dest <- blob.SizedRef{br, plainSize}:
case dest <- blob.SizedRef{Ref: br, Size: plainSize}:
case <-ctx.Done():
return ctx.Err()
}

View File

@ -128,7 +128,7 @@ func (sto *sto) touchBlob(sb blob.SizedRef) {
func (sto *sto) Fetch(b blob.Ref) (rc io.ReadCloser, size uint32, err error) {
rc, size, err = sto.cache.Fetch(b)
if err == nil {
sto.touchBlob(blob.SizedRef{b, size})
sto.touchBlob(blob.SizedRef{Ref: b, Size: size})
return
}
if err != os.ErrNotExist {
@ -147,7 +147,7 @@ func (sto *sto) Fetch(b blob.Ref) (rc io.ReadCloser, size uint32, err error) {
log.Printf("populating proxycache cache for %v: %v", b, err)
return
}
sto.touchBlob(blob.SizedRef{b, size})
sto.touchBlob(blob.SizedRef{Ref: b, Size: size})
}()
return ioutil.NopCloser(bytes.NewReader(all)), size, nil
}

View File

@ -54,7 +54,7 @@ func (ix *Index) EnumerateBlobs(ctx context.Context, dest chan<- blob.SizedRef,
size, err := parseHaveVal(it.Value())
if err == nil {
select {
case dest <- blob.SizedRef{br, uint32(size)}:
case dest <- blob.SizedRef{Ref: br, Size: uint32(size)}:
case <-ctx.Done():
return ctx.Err()
}
@ -77,7 +77,7 @@ func (ix *Index) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref) error {
if err != nil {
return fmt.Errorf("invalid size for key %q = %q", key, v)
}
dest <- blob.SizedRef{br, uint32(size)}
dest <- blob.SizedRef{Ref: br, Size: uint32(size)}
}
return nil
}

View File

@ -201,7 +201,7 @@ func (ix *Index) ReceiveBlob(blobRef blob.Ref, source io.Reader) (retsb blob.Siz
}
if haveVal, haveErr := ix.s.Get("have:" + blobRef.String()); haveErr == nil {
if strings.HasSuffix(haveVal, "|indexed") {
return blob.SizedRef{blobRef, uint32(written)}, nil
return blob.SizedRef{Ref: blobRef, Size: uint32(written)}, nil
}
}
@ -233,7 +233,7 @@ func (ix *Index) ReceiveBlob(blobRef blob.Ref, source io.Reader) (retsb blob.Siz
// successfully recorded that the blob isn't
// indexed, but we'll reindex it later once
// the dependent blobs arrive.
return blob.SizedRef{blobRef, uint32(written)}, nil
return blob.SizedRef{Ref: blobRef, Size: uint32(written)}, nil
}
return
}
@ -255,7 +255,7 @@ func (ix *Index) ReceiveBlob(blobRef blob.Ref, source io.Reader) (retsb blob.Siz
// mimeType := sniffer.MIMEType()
// log.Printf("indexer: received %s; type=%v; truncated=%v", blobRef, mimeType, sniffer.IsTruncated())
return blob.SizedRef{blobRef, uint32(written)}, nil
return blob.SizedRef{Ref: blobRef, Size: uint32(written)}, nil
}
// commit writes the contents of the mutationMap on a batch

View File

@ -34,7 +34,7 @@ func Mksocket(path string) error {
dir := filepath.Dir(path)
base := filepath.Base(path)
tmp := filepath.Join(dir, "."+base)
l, err := net.ListenUnix("unix", &net.UnixAddr{tmp, "unix"})
l, err := net.ListenUnix("unix", &net.UnixAddr{Name: tmp, Net: "unix"})
if err != nil {
return err
}

View File

@ -1,3 +1,5 @@
// +build cgo
/*
Copyright 2015 The Camlistore Authors
@ -14,8 +16,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// +build cgo
package server
func init() {

View File

@ -447,7 +447,7 @@ func (sh *SyncHandler) enumeratePendingBlobs(dst chan<- blob.SizedRef, intr <-ch
}
toSend = make([]blob.SizedRef, 0, n)
for br, size := range sh.needCopy {
toSend = append(toSend, blob.SizedRef{br, size})
toSend = append(toSend, blob.SizedRef{Ref: br, Size: size})
if len(toSend) == n {
break
}
@ -477,7 +477,7 @@ func (sh *SyncHandler) enumerateQueuedBlobs(dst chan<- blob.SizedRef, intr <-cha
continue
}
select {
case dst <- blob.SizedRef{br, uint32(size)}:
case dst <- blob.SizedRef{Ref: br, Size: uint32(size)}:
case <-intr:
return it.Close()
}
@ -603,7 +603,7 @@ func (sh *SyncHandler) ReceiveBlob(br blob.Ref, r io.Reader) (sb blob.SizedRef,
if err != nil {
return
}
sb = blob.SizedRef{br, uint32(n)}
sb = blob.SizedRef{Ref: br, Size: uint32(n)}
return sb, sh.enqueue(sb)
}

View File

@ -374,9 +374,9 @@ func (hl *handlerLoader) setupHandler(prefix string) {
if h.internal {
wrappedHandler = unauthorizedHandler{}
} else {
wrappedHandler = &httputil.PrefixHandler{prefix, hh}
wrappedHandler = &httputil.PrefixHandler{Prefix: prefix, Handler: hh}
if handlerTypeWantsAuth(h.htype) {
wrappedHandler = auth.Handler{wrappedHandler}
wrappedHandler = auth.Handler{Handler: wrappedHandler}
}
}
hl.installer.Handle(prefix, wrappedHandler)