2011-05-26 23:46:27 +00:00
|
|
|
/*
|
|
|
|
Copyright 2011 Google Inc.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package schema
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2011-06-06 20:30:54 +00:00
|
|
|
"io"
|
2011-05-31 17:20:28 +00:00
|
|
|
"json"
|
|
|
|
"log"
|
2011-05-26 23:46:27 +00:00
|
|
|
"os"
|
|
|
|
|
|
|
|
"camli/blobref"
|
|
|
|
)
|
|
|
|
|
2011-05-31 17:35:45 +00:00
|
|
|
var _ = log.Printf
|
|
|
|
|
2011-06-10 01:28:07 +00:00
|
|
|
const closedIndex = -1
|
2011-07-02 16:09:50 +00:00
|
|
|
|
2011-06-10 01:28:07 +00:00
|
|
|
var errClosed = os.NewError("filereader is closed")
|
|
|
|
|
2011-07-17 15:50:55 +00:00
|
|
|
// A DirReader reads the entries of a "directory" schema blob's
|
|
|
|
// referenced "static-set" blob.
|
|
|
|
type DirReader struct {
|
|
|
|
fetcher blobref.SeekFetcher
|
|
|
|
ss *Superset
|
|
|
|
|
|
|
|
staticSet []*blobref.BlobRef
|
|
|
|
current int
|
|
|
|
}
|
|
|
|
|
2011-07-19 02:06:56 +00:00
|
|
|
// NewDirReader creates a new directory reader and prepares to
|
2011-07-17 15:50:55 +00:00
|
|
|
// fetch the static-set entries
|
|
|
|
func NewDirReader(fetcher blobref.SeekFetcher, dirBlobRef *blobref.BlobRef) (*DirReader, os.Error) {
|
|
|
|
ss := new(Superset)
|
|
|
|
err := ss.setFromBlobRef(fetcher, dirBlobRef)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if ss.Type != "directory" {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: expected \"directory\" schema blob for %s, got %q", dirBlobRef, ss.Type)
|
|
|
|
}
|
|
|
|
dr, err := ss.NewDirReader(fetcher)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: creating DirReader for %s: %v", dirBlobRef, err)
|
|
|
|
}
|
|
|
|
dr.current = 0
|
|
|
|
return dr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ss *Superset) NewDirReader(fetcher blobref.SeekFetcher) (*DirReader, os.Error) {
|
|
|
|
if ss.Type != "directory" {
|
|
|
|
return nil, fmt.Errorf("Superset not of type \"directory\"")
|
|
|
|
}
|
|
|
|
return &DirReader{fetcher: fetcher, ss: ss}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ss *Superset) setFromBlobRef(fetcher blobref.SeekFetcher, blobRef *blobref.BlobRef) os.Error {
|
|
|
|
if blobRef == nil {
|
|
|
|
return os.NewError("schema/filereader: blobref was nil")
|
|
|
|
}
|
|
|
|
ss.BlobRef = blobRef
|
|
|
|
rsc, _, err := fetcher.Fetch(blobRef)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("schema/filereader: fetching schema blob %s: %v", blobRef, err)
|
|
|
|
}
|
|
|
|
if err = json.NewDecoder(rsc).Decode(ss); err != nil {
|
|
|
|
return fmt.Errorf("schema/filereader: decoding schema blob %s: %v", blobRef, err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// StaticSet returns the whole of the static set members of that directory
|
|
|
|
func (dr *DirReader) StaticSet() ([]*blobref.BlobRef, os.Error) {
|
|
|
|
if dr.staticSet != nil {
|
|
|
|
return dr.staticSet, nil
|
|
|
|
}
|
|
|
|
staticSetBlobref := blobref.Parse(dr.ss.Entries)
|
|
|
|
if staticSetBlobref == nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: Invalid blobref\n")
|
|
|
|
}
|
|
|
|
rsc, _, err := dr.fetcher.Fetch(staticSetBlobref)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: fetching schema blob %s: %v", staticSetBlobref, err)
|
|
|
|
}
|
|
|
|
ss := new(Superset)
|
|
|
|
if err = json.NewDecoder(rsc).Decode(ss); err != nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: decoding schema blob %s: %v", staticSetBlobref, err)
|
|
|
|
}
|
|
|
|
if ss.Type != "static-set" {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: expected \"static-set\" schema blob for %s, got %q", staticSetBlobref, ss.Type)
|
|
|
|
}
|
|
|
|
for _, s := range ss.Members {
|
|
|
|
member := blobref.Parse(s)
|
|
|
|
if member == nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: invalid (static-set member) blobref\n")
|
|
|
|
}
|
|
|
|
dr.staticSet = append(dr.staticSet, member)
|
|
|
|
}
|
|
|
|
return dr.staticSet, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Readdir implements the Directory interface.
|
|
|
|
func (dr *DirReader) Readdir(n int) (entries []DirectoryEntry, err os.Error) {
|
|
|
|
sts, err := dr.StaticSet()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: can't get StaticSet: %v\n", err)
|
|
|
|
}
|
|
|
|
up := dr.current + n
|
|
|
|
if n <= 0 {
|
|
|
|
dr.current = 0
|
|
|
|
up = len(sts)
|
|
|
|
} else {
|
|
|
|
if n > (len(sts) - dr.current) {
|
|
|
|
err = os.EOF
|
|
|
|
up = len(sts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, entryBref := range sts[dr.current:up] {
|
|
|
|
entry, err := NewDirectoryEntryFromBlobRef(dr.fetcher, entryBref)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: can't create dirEntry: %v\n", err)
|
|
|
|
}
|
|
|
|
entries = append(entries, entry)
|
|
|
|
}
|
|
|
|
return entries, err
|
|
|
|
}
|
|
|
|
|
2011-05-26 23:46:27 +00:00
|
|
|
type FileReader struct {
|
2011-06-04 15:56:03 +00:00
|
|
|
fetcher blobref.SeekFetcher
|
2011-05-26 23:46:27 +00:00
|
|
|
ss *Superset
|
2011-05-31 17:35:45 +00:00
|
|
|
|
2011-06-10 01:28:07 +00:00
|
|
|
ci int // index into contentparts, or -1 on closed
|
2011-06-06 20:30:54 +00:00
|
|
|
ccon uint64 // bytes into current chunk already consumed
|
|
|
|
remain int64 // bytes remaining
|
|
|
|
|
|
|
|
cr blobref.ReadSeekCloser // cached reader (for blobref chunks)
|
2011-06-06 15:50:20 +00:00
|
|
|
crbr *blobref.BlobRef // the blobref that cr is for
|
2011-06-06 20:30:54 +00:00
|
|
|
|
|
|
|
csubfr *FileReader // cached sub blobref reader (for subBlobRef chunks)
|
|
|
|
ccp *ContentPart // the content part that csubfr is cached for
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
|
|
|
|
2011-05-31 17:20:28 +00:00
|
|
|
// TODO: make this take a blobref.FetcherAt instead?
|
2011-06-04 15:56:03 +00:00
|
|
|
func NewFileReader(fetcher blobref.SeekFetcher, fileBlobRef *blobref.BlobRef) (*FileReader, os.Error) {
|
2011-06-06 20:30:54 +00:00
|
|
|
if fileBlobRef == nil {
|
|
|
|
return nil, os.NewError("schema/filereader: NewFileReader blobref was nil")
|
|
|
|
}
|
2011-05-31 17:20:28 +00:00
|
|
|
ss := new(Superset)
|
|
|
|
rsc, _, err := fetcher.Fetch(fileBlobRef)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: fetching file schema blob: %v", err)
|
|
|
|
}
|
|
|
|
if err = json.NewDecoder(rsc).Decode(ss); err != nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: decoding file schema blob: %v", err)
|
|
|
|
}
|
|
|
|
if ss.Type != "file" {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: expected \"file\" schema blob, got %q", ss.Type)
|
|
|
|
}
|
2011-07-13 08:52:37 +00:00
|
|
|
fr, err := ss.NewFileReader(fetcher)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: creating FileReader for %s: %v", fileBlobRef, err)
|
|
|
|
}
|
|
|
|
return fr, nil
|
2011-05-31 17:20:28 +00:00
|
|
|
}
|
|
|
|
|
2011-07-13 08:52:37 +00:00
|
|
|
func (ss *Superset) NewFileReader(fetcher blobref.SeekFetcher) (*FileReader, os.Error) {
|
|
|
|
if ss.Type != "file" {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: Superset not of type \"file\"")
|
|
|
|
}
|
|
|
|
return &FileReader{fetcher: fetcher, ss: ss, remain: int64(ss.Size)}, nil
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
|
|
|
|
2011-05-31 17:20:28 +00:00
|
|
|
// FileSchema returns the reader's schema superset. Don't mutate it.
|
|
|
|
func (fr *FileReader) FileSchema() *Superset {
|
|
|
|
return fr.ss
|
|
|
|
}
|
|
|
|
|
2011-06-10 01:28:07 +00:00
|
|
|
func (fr *FileReader) Close() os.Error {
|
|
|
|
if fr.ci == closedIndex {
|
|
|
|
return errClosed
|
|
|
|
}
|
|
|
|
fr.closeOpenBlobs()
|
|
|
|
fr.ci = closedIndex
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2011-06-06 20:30:54 +00:00
|
|
|
func (fr *FileReader) Skip(skipBytes uint64) uint64 {
|
2011-06-10 01:28:07 +00:00
|
|
|
if fr.ci == closedIndex {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2011-06-06 20:30:54 +00:00
|
|
|
wantedSkipped := skipBytes
|
|
|
|
|
2011-05-26 23:46:27 +00:00
|
|
|
for skipBytes != 0 && fr.ci < len(fr.ss.ContentParts) {
|
|
|
|
cp := fr.ss.ContentParts[fr.ci]
|
|
|
|
thisChunkSkippable := cp.Size - fr.ccon
|
|
|
|
toSkip := minu64(skipBytes, thisChunkSkippable)
|
|
|
|
fr.ccon += toSkip
|
2011-06-06 15:50:20 +00:00
|
|
|
fr.remain -= int64(toSkip)
|
2011-05-26 23:46:27 +00:00
|
|
|
if fr.ccon == cp.Size {
|
|
|
|
fr.ci++
|
|
|
|
fr.ccon = 0
|
|
|
|
}
|
|
|
|
skipBytes -= toSkip
|
|
|
|
}
|
2011-06-06 20:30:54 +00:00
|
|
|
|
|
|
|
return wantedSkipped - skipBytes
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
|
|
|
|
2011-05-31 17:35:45 +00:00
|
|
|
func (fr *FileReader) closeOpenBlobs() {
|
|
|
|
if fr.cr != nil {
|
|
|
|
fr.cr.Close()
|
|
|
|
fr.cr = nil
|
|
|
|
fr.crbr = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-06 20:30:54 +00:00
|
|
|
func (fr *FileReader) readerFor(br *blobref.BlobRef, seekTo int64) (r io.Reader, err os.Error) {
|
2011-05-31 17:35:45 +00:00
|
|
|
if fr.crbr == br {
|
|
|
|
return fr.cr, nil
|
|
|
|
}
|
|
|
|
fr.closeOpenBlobs()
|
2011-06-06 20:30:54 +00:00
|
|
|
var rsc blobref.ReadSeekCloser
|
2011-06-06 18:31:45 +00:00
|
|
|
if br != nil {
|
|
|
|
rsc, _, err = fr.fetcher.Fetch(br)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2011-06-06 20:30:54 +00:00
|
|
|
|
|
|
|
_, serr := rsc.Seek(int64(seekTo), os.SEEK_SET)
|
|
|
|
if serr != nil {
|
|
|
|
return nil, fmt.Errorf("schema: FileReader.Read seek error on blob %s: %v", br, serr)
|
|
|
|
}
|
|
|
|
|
2011-06-06 18:31:45 +00:00
|
|
|
} else {
|
|
|
|
rsc = &zeroReader{}
|
2011-05-31 17:35:45 +00:00
|
|
|
}
|
|
|
|
fr.crbr = br
|
|
|
|
fr.cr = rsc
|
|
|
|
return rsc, nil
|
|
|
|
}
|
|
|
|
|
2011-06-06 20:30:54 +00:00
|
|
|
func (fr *FileReader) subBlobRefReader(cp *ContentPart) (io.Reader, os.Error) {
|
|
|
|
if fr.ccp == cp {
|
|
|
|
return fr.csubfr, nil
|
|
|
|
}
|
2011-06-08 00:00:21 +00:00
|
|
|
subfr, err := NewFileReader(fr.fetcher, cp.SubBlobRef)
|
2011-06-06 20:30:54 +00:00
|
|
|
if err == nil {
|
|
|
|
subfr.Skip(cp.Offset)
|
|
|
|
fr.csubfr = subfr
|
|
|
|
fr.ccp = cp
|
|
|
|
}
|
|
|
|
return subfr, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fr *FileReader) currentPart() (*ContentPart, os.Error) {
|
2011-05-26 23:46:27 +00:00
|
|
|
for {
|
|
|
|
if fr.ci >= len(fr.ss.ContentParts) {
|
2011-05-31 17:35:45 +00:00
|
|
|
fr.closeOpenBlobs()
|
2011-06-06 15:50:20 +00:00
|
|
|
if fr.remain > 0 {
|
2011-06-06 20:30:54 +00:00
|
|
|
return nil, fmt.Errorf("schema: declared file schema size was larger than sum of content parts")
|
2011-06-06 15:50:20 +00:00
|
|
|
}
|
2011-06-06 20:30:54 +00:00
|
|
|
return nil, os.EOF
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
2011-06-06 20:30:54 +00:00
|
|
|
cp := fr.ss.ContentParts[fr.ci]
|
2011-05-26 23:46:27 +00:00
|
|
|
thisChunkReadable := cp.Size - fr.ccon
|
|
|
|
if thisChunkReadable == 0 {
|
|
|
|
fr.ci++
|
|
|
|
fr.ccon = 0
|
|
|
|
continue
|
|
|
|
}
|
2011-06-06 20:30:54 +00:00
|
|
|
return cp, nil
|
|
|
|
}
|
|
|
|
panic("unreachable")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fr *FileReader) Read(p []byte) (n int, err os.Error) {
|
2011-06-10 01:28:07 +00:00
|
|
|
if fr.ci == closedIndex {
|
|
|
|
return 0, errClosed
|
|
|
|
}
|
|
|
|
|
2011-06-06 20:30:54 +00:00
|
|
|
cp, err := fr.currentPart()
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
|
|
|
|
2011-06-06 15:50:20 +00:00
|
|
|
if cp.Size == 0 {
|
|
|
|
return 0, fmt.Errorf("blobref content part contained illegal size 0")
|
|
|
|
}
|
|
|
|
|
2011-06-08 00:00:21 +00:00
|
|
|
br := cp.BlobRef
|
|
|
|
sbr := cp.SubBlobRef
|
2011-06-06 18:31:45 +00:00
|
|
|
if br != nil && sbr != nil {
|
|
|
|
return 0, fmt.Errorf("content part index %d has both blobRef and subFileBlobRef", fr.ci)
|
|
|
|
}
|
2011-05-31 17:35:45 +00:00
|
|
|
|
2011-06-06 20:30:54 +00:00
|
|
|
var r io.Reader
|
2011-05-26 23:46:27 +00:00
|
|
|
|
2011-06-06 20:30:54 +00:00
|
|
|
if sbr != nil {
|
|
|
|
r, err = fr.subBlobRefReader(cp)
|
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("schema: FileReader.Read error fetching sub file %s: %v", sbr, err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
seekTo := cp.Offset + fr.ccon
|
|
|
|
r, err = fr.readerFor(br, int64(seekTo))
|
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("schema: FileReader.Read error fetching blob %s: %v", br, err)
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
readSize := cp.Size - fr.ccon
|
2011-06-06 20:30:54 +00:00
|
|
|
if readSize < uint64(len(p)) {
|
|
|
|
p = p[:int(readSize)]
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
|
|
|
|
2011-06-06 20:30:54 +00:00
|
|
|
n, err = r.Read(p)
|
2011-06-06 15:50:20 +00:00
|
|
|
fr.ccon += uint64(n)
|
|
|
|
fr.remain -= int64(n)
|
|
|
|
if fr.remain < 0 {
|
|
|
|
err = fmt.Errorf("schema: file schema was invalid; content parts sum to over declared size")
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func minu64(a, b uint64) uint64 {
|
|
|
|
if a < b {
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
return b
|
|
|
|
}
|
2011-06-06 18:31:45 +00:00
|
|
|
|
2011-06-06 20:30:54 +00:00
|
|
|
type zeroReader struct{}
|
2011-06-06 18:31:45 +00:00
|
|
|
|
|
|
|
func (*zeroReader) Read(p []byte) (int, os.Error) {
|
|
|
|
for i := range p {
|
|
|
|
p[i] = 0
|
|
|
|
}
|
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (*zeroReader) Close() os.Error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (*zeroReader) Seek(offset int64, whence int) (newFilePos int64, err os.Error) {
|
|
|
|
// Caller is ignoring our newFilePos return value.
|
|
|
|
return 0, nil
|
|
|
|
}
|