2011-05-26 23:46:27 +00:00
|
|
|
/*
|
|
|
|
Copyright 2011 Google Inc.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package schema
|
|
|
|
|
|
|
|
import (
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
"errors"
|
2011-05-26 23:46:27 +00:00
|
|
|
"fmt"
|
2011-06-06 20:30:54 +00:00
|
|
|
"io"
|
2012-08-24 03:09:31 +00:00
|
|
|
"io/ioutil"
|
2011-05-31 17:20:28 +00:00
|
|
|
"log"
|
2011-05-26 23:46:27 +00:00
|
|
|
"os"
|
2012-12-26 01:27:52 +00:00
|
|
|
"strings"
|
2013-01-06 04:07:38 +00:00
|
|
|
"sync"
|
2011-05-26 23:46:27 +00:00
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
"camlistore.org/pkg/blob"
|
2013-01-06 04:07:38 +00:00
|
|
|
"camlistore.org/pkg/singleflight"
|
2013-07-29 03:08:55 +00:00
|
|
|
"camlistore.org/pkg/types"
|
2011-05-26 23:46:27 +00:00
|
|
|
)
|
|
|
|
|
2011-06-10 01:28:07 +00:00
|
|
|
const closedIndex = -1
|
2011-07-02 16:09:50 +00:00
|
|
|
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
var errClosed = errors.New("filereader is closed")
|
2011-06-10 01:28:07 +00:00
|
|
|
|
2012-12-26 01:27:52 +00:00
|
|
|
// A FileReader reads the bytes of "file" and "bytes" schema blobrefs.
|
2011-05-26 23:46:27 +00:00
|
|
|
type FileReader struct {
|
2013-01-06 18:50:29 +00:00
|
|
|
// Immutable stuff:
|
2013-01-22 04:56:12 +00:00
|
|
|
*io.SectionReader // provides Read, etc.
|
|
|
|
parent *FileReader // or nil for sub-region readers to find the ssm map in getSuperset
|
|
|
|
rootOff int64 // this FileReader's offset from the root
|
2013-08-04 02:54:30 +00:00
|
|
|
fetcher blob.SeekFetcher
|
2013-01-22 18:32:15 +00:00
|
|
|
ss *superset
|
2013-01-22 04:56:12 +00:00
|
|
|
size int64 // total number of bytes
|
2013-01-06 18:50:29 +00:00
|
|
|
|
|
|
|
sfg singleflight.Group // for loading blobrefs for ssm
|
2011-05-31 17:35:45 +00:00
|
|
|
|
2013-01-06 18:50:29 +00:00
|
|
|
ssmmu sync.Mutex // guards ssm
|
2013-01-22 18:32:15 +00:00
|
|
|
ssm map[string]*superset // blobref -> superset
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
|
|
|
|
2012-12-26 01:27:52 +00:00
|
|
|
// NewFileReader returns a new FileReader reading the contents of fileBlobRef,
|
|
|
|
// fetching blobs from fetcher. The fileBlobRef must be of a "bytes" or "file"
|
|
|
|
// schema blob.
|
2013-01-01 02:02:13 +00:00
|
|
|
//
|
|
|
|
// The caller should call Close on the FileReader when done reading.
|
2013-08-04 02:54:30 +00:00
|
|
|
func NewFileReader(fetcher blob.SeekFetcher, fileBlobRef blob.Ref) (*FileReader, error) {
|
2012-12-26 01:27:52 +00:00
|
|
|
// TODO(bradfitz): make this take a blobref.FetcherAt instead?
|
|
|
|
// TODO(bradfitz): rename this into bytes reader? but for now it's still
|
|
|
|
// named FileReader, but can also read a "bytes" schema.
|
2013-08-04 02:54:30 +00:00
|
|
|
if !fileBlobRef.Valid() {
|
|
|
|
return nil, errors.New("schema/filereader: NewFileReader blobref invalid")
|
2011-06-06 20:30:54 +00:00
|
|
|
}
|
2011-05-31 17:20:28 +00:00
|
|
|
rsc, _, err := fetcher.Fetch(fileBlobRef)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: fetching file schema blob: %v", err)
|
|
|
|
}
|
2012-08-24 03:09:31 +00:00
|
|
|
defer rsc.Close()
|
2013-01-22 18:32:15 +00:00
|
|
|
ss, err := parseSuperset(rsc)
|
2013-01-04 01:14:23 +00:00
|
|
|
if err != nil {
|
2011-05-31 17:20:28 +00:00
|
|
|
return nil, fmt.Errorf("schema/filereader: decoding file schema blob: %v", err)
|
|
|
|
}
|
2011-09-08 00:51:29 +00:00
|
|
|
if ss.Type != "file" && ss.Type != "bytes" {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: expected \"file\" or \"bytes\" schema blob, got %q", ss.Type)
|
2011-05-31 17:20:28 +00:00
|
|
|
}
|
2011-07-13 08:52:37 +00:00
|
|
|
fr, err := ss.NewFileReader(fetcher)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: creating FileReader for %s: %v", fileBlobRef, err)
|
|
|
|
}
|
|
|
|
return fr, nil
|
2011-05-31 17:20:28 +00:00
|
|
|
}
|
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
func (b *Blob) NewFileReader(fetcher blob.SeekFetcher) (*FileReader, error) {
|
2013-01-22 17:32:40 +00:00
|
|
|
return b.ss.NewFileReader(fetcher)
|
|
|
|
}
|
|
|
|
|
2013-01-05 02:11:40 +00:00
|
|
|
// NewFileReader returns a new FileReader, reading bytes and blobs
|
|
|
|
// from the provided fetcher.
|
|
|
|
//
|
|
|
|
// NewFileReader does no fetch operation on the fetcher itself. The
|
|
|
|
// fetcher is only used in subsequent read operations.
|
|
|
|
//
|
2013-01-22 18:32:15 +00:00
|
|
|
// An error is only returned if the type of the superset is not either
|
2013-01-05 02:11:40 +00:00
|
|
|
// "file" or "bytes".
|
2013-08-04 02:54:30 +00:00
|
|
|
func (ss *superset) NewFileReader(fetcher blob.SeekFetcher) (*FileReader, error) {
|
2011-09-08 00:51:29 +00:00
|
|
|
if ss.Type != "file" && ss.Type != "bytes" {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: Superset not of type \"file\" or \"bytes\"")
|
2011-07-13 08:52:37 +00:00
|
|
|
}
|
2012-08-24 01:44:03 +00:00
|
|
|
size := int64(ss.SumPartsSize())
|
2012-12-26 01:27:52 +00:00
|
|
|
fr := &FileReader{
|
2012-08-24 01:44:03 +00:00
|
|
|
fetcher: fetcher,
|
|
|
|
ss: ss,
|
|
|
|
size: size,
|
2013-01-22 18:32:15 +00:00
|
|
|
ssm: make(map[string]*superset),
|
2012-12-26 01:27:52 +00:00
|
|
|
}
|
|
|
|
fr.SectionReader = io.NewSectionReader(fr, 0, size)
|
|
|
|
return fr, nil
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
|
|
|
|
2013-01-06 07:12:42 +00:00
|
|
|
// LoadAllChunks causes all chunks of the file to be loaded as quickly
|
|
|
|
// as possible. The contents are immediately discarded, so it is
|
|
|
|
// assumed that the fetcher is a caching fetcher.
|
|
|
|
func (fr *FileReader) LoadAllChunks() {
|
|
|
|
offsetc := make(chan int64, 16)
|
|
|
|
go func() {
|
|
|
|
for off := range offsetc {
|
|
|
|
go func(off int64) {
|
|
|
|
rc, err := fr.readerForOffset(off)
|
|
|
|
if err == nil {
|
2013-01-06 18:50:29 +00:00
|
|
|
defer rc.Close()
|
|
|
|
var b [1]byte
|
|
|
|
rc.Read(b[:]) // fault in the blob
|
2013-01-06 07:12:42 +00:00
|
|
|
}
|
|
|
|
}(off)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
go fr.GetChunkOffsets(offsetc)
|
|
|
|
}
|
|
|
|
|
2011-05-31 17:20:28 +00:00
|
|
|
// FileSchema returns the reader's schema superset. Don't mutate it.
|
2013-01-22 18:32:15 +00:00
|
|
|
func (fr *FileReader) FileSchema() *superset {
|
2011-05-31 17:20:28 +00:00
|
|
|
return fr.ss
|
|
|
|
}
|
|
|
|
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
func (fr *FileReader) Close() error {
|
2012-12-26 01:27:52 +00:00
|
|
|
// TODO: close cached blobs?
|
2011-06-10 01:28:07 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2012-08-24 01:44:03 +00:00
|
|
|
var _ interface {
|
|
|
|
io.ReaderAt
|
|
|
|
io.Reader
|
|
|
|
io.Closer
|
2012-12-26 01:27:52 +00:00
|
|
|
Size() int64
|
2012-08-24 01:44:03 +00:00
|
|
|
} = (*FileReader)(nil)
|
|
|
|
|
|
|
|
func (fr *FileReader) ReadAt(p []byte, offset int64) (n int, err error) {
|
|
|
|
if offset < 0 {
|
|
|
|
return 0, errors.New("schema/filereader: negative offset")
|
|
|
|
}
|
|
|
|
if offset >= fr.Size() {
|
|
|
|
return 0, io.EOF
|
|
|
|
}
|
|
|
|
want := len(p)
|
|
|
|
for len(p) > 0 && err == nil {
|
2012-08-24 03:09:31 +00:00
|
|
|
var rc io.ReadCloser
|
|
|
|
rc, err = fr.readerForOffset(offset)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2012-12-26 01:27:52 +00:00
|
|
|
var n1 int64 // never bigger than an int
|
|
|
|
n1, err = io.CopyN(&sliceWriter{p}, rc, int64(len(p)))
|
2012-08-24 03:09:31 +00:00
|
|
|
rc.Close()
|
2012-12-26 01:27:52 +00:00
|
|
|
if err == io.EOF {
|
2012-08-24 01:44:03 +00:00
|
|
|
err = nil
|
|
|
|
}
|
2012-12-26 01:27:52 +00:00
|
|
|
if n1 == 0 {
|
|
|
|
break
|
|
|
|
}
|
2012-08-24 03:09:31 +00:00
|
|
|
p = p[n1:]
|
2012-12-26 01:27:52 +00:00
|
|
|
offset += int64(n1)
|
|
|
|
n += int(n1)
|
2012-08-24 01:44:03 +00:00
|
|
|
}
|
|
|
|
if n < want && err == nil {
|
|
|
|
err = io.ErrUnexpectedEOF
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
2013-01-06 07:12:42 +00:00
|
|
|
// GetChunkOffsets sends c each of the file's chunk offsets.
|
|
|
|
// The offsets are not necessarily sent in order, and all ranges of the file
|
|
|
|
// are not necessarily represented if the file contains zero holes.
|
|
|
|
// The channel c is closed before the function returns, regardless of error.
|
|
|
|
func (fr *FileReader) GetChunkOffsets(c chan<- int64) error {
|
|
|
|
defer close(c)
|
|
|
|
return fr.sendPartsChunks(c, 0, fr.ss.Parts)
|
|
|
|
}
|
2013-01-05 02:11:40 +00:00
|
|
|
|
2013-01-06 07:12:42 +00:00
|
|
|
func (fr *FileReader) sendPartsChunks(c chan<- int64, off int64, parts []*BytesPart) error {
|
|
|
|
var errcs []chan error
|
|
|
|
for _, p := range parts {
|
|
|
|
switch {
|
2013-08-04 02:54:30 +00:00
|
|
|
case p.BlobRef.Valid() && p.BytesRef.Valid():
|
2013-01-06 07:12:42 +00:00
|
|
|
return fmt.Errorf("part illegally contained both a blobRef and bytesRef")
|
2013-08-04 02:54:30 +00:00
|
|
|
case !p.BlobRef.Valid() && !p.BytesRef.Valid():
|
2013-01-06 07:12:42 +00:00
|
|
|
// Don't send
|
2013-08-04 02:54:30 +00:00
|
|
|
case p.BlobRef.Valid():
|
2013-01-06 07:12:42 +00:00
|
|
|
c <- off
|
2013-08-04 02:54:30 +00:00
|
|
|
case p.BytesRef.Valid():
|
2013-01-06 07:12:42 +00:00
|
|
|
errc := make(chan error, 1)
|
|
|
|
errcs = append(errcs, errc)
|
|
|
|
br := p.BytesRef
|
|
|
|
offNow := off
|
|
|
|
go func() {
|
|
|
|
ss, err := fr.getSuperset(br)
|
|
|
|
if err != nil {
|
|
|
|
errc <- err
|
|
|
|
return
|
|
|
|
}
|
2013-01-06 18:50:29 +00:00
|
|
|
errc <- fr.sendPartsChunks(c, offNow, ss.Parts)
|
2013-01-06 07:12:42 +00:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
off += int64(p.Size)
|
|
|
|
}
|
|
|
|
for _, errc := range errcs {
|
|
|
|
if err := <-errc; err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2013-01-05 02:11:40 +00:00
|
|
|
}
|
|
|
|
|
2012-12-26 01:27:52 +00:00
|
|
|
type sliceWriter struct {
|
|
|
|
dst []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func (sw *sliceWriter) Write(p []byte) (n int, err error) {
|
|
|
|
n = copy(sw.dst, p)
|
|
|
|
sw.dst = sw.dst[n:]
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var eofReader io.ReadCloser = ioutil.NopCloser(strings.NewReader(""))
|
|
|
|
|
2013-01-06 18:50:29 +00:00
|
|
|
func (fr *FileReader) rootReader() *FileReader {
|
|
|
|
if fr.parent != nil {
|
|
|
|
return fr.parent.rootReader()
|
|
|
|
}
|
|
|
|
return fr
|
|
|
|
}
|
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
func (fr *FileReader) getSuperset(br blob.Ref) (*superset, error) {
|
2013-01-06 18:50:29 +00:00
|
|
|
if root := fr.rootReader(); root != fr {
|
|
|
|
return root.getSuperset(br)
|
|
|
|
}
|
2013-01-06 04:07:38 +00:00
|
|
|
brStr := br.String()
|
|
|
|
ssi, err := fr.sfg.Do(brStr, func() (interface{}, error) {
|
2013-01-06 16:53:29 +00:00
|
|
|
fr.ssmmu.Lock()
|
|
|
|
ss, ok := fr.ssm[brStr]
|
|
|
|
fr.ssmmu.Unlock()
|
|
|
|
if ok {
|
|
|
|
return ss, nil
|
|
|
|
}
|
2013-01-06 04:07:38 +00:00
|
|
|
rsc, _, err := fr.fetcher.Fetch(br)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("schema/filereader: fetching file schema blob: %v", err)
|
|
|
|
}
|
|
|
|
defer rsc.Close()
|
2013-01-22 18:32:15 +00:00
|
|
|
ss, err = parseSuperset(rsc)
|
2013-01-06 16:53:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
fr.ssmmu.Lock()
|
|
|
|
defer fr.ssmmu.Unlock()
|
|
|
|
fr.ssm[brStr] = ss
|
|
|
|
return ss, nil
|
2013-01-06 04:07:38 +00:00
|
|
|
})
|
2013-01-05 04:20:41 +00:00
|
|
|
if err != nil {
|
2013-01-06 04:07:38 +00:00
|
|
|
return nil, err
|
2013-01-05 04:20:41 +00:00
|
|
|
}
|
2013-01-22 18:32:15 +00:00
|
|
|
return ssi.(*superset), nil
|
2013-01-05 04:20:41 +00:00
|
|
|
}
|
|
|
|
|
2013-01-06 18:50:29 +00:00
|
|
|
var debug = os.Getenv("CAMLI_DEBUG") != ""
|
|
|
|
|
2012-12-26 01:27:52 +00:00
|
|
|
// readerForOffset returns a ReadCloser that reads some number of bytes and then EOF
|
|
|
|
// from the provided offset. Seeing EOF doesn't mean the end of the whole file; just the
|
|
|
|
// chunk at that offset. The caller must close the ReadCloser when done reading.
|
2012-08-24 03:09:31 +00:00
|
|
|
func (fr *FileReader) readerForOffset(off int64) (io.ReadCloser, error) {
|
2013-01-06 18:50:29 +00:00
|
|
|
if debug {
|
2013-01-22 04:56:12 +00:00
|
|
|
log.Printf("(%p) readerForOffset %d + %d = %d", fr, fr.rootOff, off, fr.rootOff+off)
|
2013-01-06 18:50:29 +00:00
|
|
|
}
|
2012-08-24 03:09:31 +00:00
|
|
|
if off < 0 {
|
|
|
|
panic("negative offset")
|
|
|
|
}
|
2012-12-26 01:27:52 +00:00
|
|
|
if off >= fr.size {
|
|
|
|
return eofReader, nil
|
2012-08-24 03:09:31 +00:00
|
|
|
}
|
2012-12-26 01:27:52 +00:00
|
|
|
offRemain := off
|
2013-01-06 18:50:29 +00:00
|
|
|
var skipped int64
|
2012-12-26 01:27:52 +00:00
|
|
|
parts := fr.ss.Parts
|
|
|
|
for len(parts) > 0 && parts[0].Size <= uint64(offRemain) {
|
|
|
|
offRemain -= int64(parts[0].Size)
|
2013-01-06 18:50:29 +00:00
|
|
|
skipped += int64(parts[0].Size)
|
2012-12-26 01:27:52 +00:00
|
|
|
parts = parts[1:]
|
2011-06-10 01:28:07 +00:00
|
|
|
}
|
2012-12-26 01:27:52 +00:00
|
|
|
if len(parts) == 0 {
|
|
|
|
return eofReader, nil
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
2012-12-26 01:27:52 +00:00
|
|
|
p0 := parts[0]
|
2013-07-29 03:08:55 +00:00
|
|
|
var rsc types.ReadSeekCloser
|
2012-12-26 01:27:52 +00:00
|
|
|
var err error
|
|
|
|
switch {
|
2013-08-04 02:54:30 +00:00
|
|
|
case p0.BlobRef.Valid() && p0.BytesRef.Valid():
|
2012-12-26 01:27:52 +00:00
|
|
|
return nil, fmt.Errorf("part illegally contained both a blobRef and bytesRef")
|
2013-08-04 02:54:30 +00:00
|
|
|
case !p0.BlobRef.Valid() && !p0.BytesRef.Valid():
|
2012-12-26 01:27:52 +00:00
|
|
|
return &nZeros{int(p0.Size - uint64(offRemain))}, nil
|
2013-08-04 02:54:30 +00:00
|
|
|
case p0.BlobRef.Valid():
|
2012-12-26 01:27:52 +00:00
|
|
|
rsc, _, err = fr.fetcher.Fetch(p0.BlobRef)
|
2013-08-04 02:54:30 +00:00
|
|
|
case p0.BytesRef.Valid():
|
2013-01-22 18:32:15 +00:00
|
|
|
var ss *superset
|
2013-01-05 04:20:41 +00:00
|
|
|
ss, err = fr.getSuperset(p0.BytesRef)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
rsc, err = ss.NewFileReader(fr.fetcher)
|
2013-01-06 18:50:29 +00:00
|
|
|
if err == nil {
|
|
|
|
subFR := rsc.(*FileReader)
|
|
|
|
subFR.parent = fr.rootReader()
|
|
|
|
subFR.rootOff = fr.rootOff + skipped
|
|
|
|
}
|
2011-06-06 15:50:20 +00:00
|
|
|
}
|
2012-12-26 01:27:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2011-06-06 18:31:45 +00:00
|
|
|
}
|
2012-12-26 01:27:52 +00:00
|
|
|
offRemain += int64(p0.Offset)
|
|
|
|
if offRemain > 0 {
|
|
|
|
newPos, err := rsc.Seek(offRemain, os.SEEK_SET)
|
2011-06-06 20:30:54 +00:00
|
|
|
if err != nil {
|
2012-12-26 01:27:52 +00:00
|
|
|
return nil, err
|
2011-06-06 20:30:54 +00:00
|
|
|
}
|
2012-12-26 01:27:52 +00:00
|
|
|
if newPos != offRemain {
|
|
|
|
panic("Seek didn't work")
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
|
|
|
}
|
2012-12-26 01:27:52 +00:00
|
|
|
return struct {
|
|
|
|
io.Reader
|
|
|
|
io.Closer
|
|
|
|
}{
|
|
|
|
io.LimitReader(rsc, int64(p0.Size)),
|
|
|
|
rsc,
|
|
|
|
}, nil
|
2011-05-26 23:46:27 +00:00
|
|
|
}
|
2011-06-06 18:31:45 +00:00
|
|
|
|
2012-08-24 03:09:31 +00:00
|
|
|
// nZeros is a ReadCloser that reads remain zeros before EOF.
|
|
|
|
type nZeros struct {
|
|
|
|
remain int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (z *nZeros) Read(p []byte) (n int, err error) {
|
|
|
|
for len(p) > 0 && z.remain > 0 {
|
|
|
|
p[0] = 0
|
|
|
|
n++
|
|
|
|
z.remain--
|
|
|
|
}
|
|
|
|
if n == 0 && z.remain == 0 {
|
|
|
|
err = io.EOF
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (*nZeros) Close() error { return nil }
|