2011-03-14 22:51:36 -07:00
|
|
|
/*
|
|
|
|
Copyright 2011 Google Inc.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2011-03-18 21:17:26 -07:00
|
|
|
package test
|
2011-03-14 22:51:36 -07:00
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/sha1"
|
2014-12-23 09:58:29 -08:00
|
|
|
"fmt"
|
2011-03-14 22:51:36 -07:00
|
|
|
"io"
|
blobserver: simplify interface more, add NewMultiBlobStreamer, storagetest func
This is round two of simplifying the BlobStreamer interface. The
continuation tokens are now sent per-item. This permits the following
item (NewMultiBlobStreamer) but also simplifies the return value (only
returns an error now) and permits the use of buffered channels as the
destination without getting out of sync.
The new NewMultiBlobStreamer is like io.MultiReader, but for
BlobStreamers, letting them be stitched together. This will be used by
the blobpacked storage layer to simplify its code, so it doesn't need
to deal with the handoff between loose and packed blobs itself. This
MultiBlobStreamer needs a 1 element buffer, which wasn't compatible
with the old BlobStreamer interface, hence the change to include the
continuation tokens with the blob on the channel.
Finally, add a new storagetest function for testing any blobstreamer
and use it for NewMultiBlobStreamer, diskpacked, and later in
blobpacked.
Updates #532
Change-Id: Iccffed289adec93ca5100c7ef8b0a8d57e05833c
2014-12-23 17:44:05 -08:00
|
|
|
"io/ioutil"
|
2011-03-14 22:51:36 -07:00
|
|
|
"strings"
|
|
|
|
"testing"
|
2013-07-07 12:15:19 -07:00
|
|
|
|
2013-08-03 19:54:30 -07:00
|
|
|
"camlistore.org/pkg/blob"
|
2013-07-07 12:15:19 -07:00
|
|
|
"camlistore.org/pkg/blobserver"
|
2016-02-05 15:56:16 +01:00
|
|
|
"go4.org/readerutil"
|
2011-03-14 22:51:36 -07:00
|
|
|
)
|
|
|
|
|
2011-03-18 21:17:26 -07:00
|
|
|
// Blob is a utility class for unit tests.
|
|
|
|
type Blob struct {
|
2011-07-02 09:09:50 -07:00
|
|
|
Contents string // the contents of the blob
|
2011-03-14 22:51:36 -07:00
|
|
|
}
|
|
|
|
|
blobserver: simplify interface more, add NewMultiBlobStreamer, storagetest func
This is round two of simplifying the BlobStreamer interface. The
continuation tokens are now sent per-item. This permits the following
item (NewMultiBlobStreamer) but also simplifies the return value (only
returns an error now) and permits the use of buffered channels as the
destination without getting out of sync.
The new NewMultiBlobStreamer is like io.MultiReader, but for
BlobStreamers, letting them be stitched together. This will be used by
the blobpacked storage layer to simplify its code, so it doesn't need
to deal with the handoff between loose and packed blobs itself. This
MultiBlobStreamer needs a 1 element buffer, which wasn't compatible
with the old BlobStreamer interface, hence the change to include the
continuation tokens with the blob on the channel.
Finally, add a new storagetest function for testing any blobstreamer
and use it for NewMultiBlobStreamer, diskpacked, and later in
blobpacked.
Updates #532
Change-Id: Iccffed289adec93ca5100c7ef8b0a8d57e05833c
2014-12-23 17:44:05 -08:00
|
|
|
func (tb *Blob) Blob() *blob.Blob {
|
|
|
|
s := tb.Contents
|
2016-02-05 15:56:16 +01:00
|
|
|
return blob.NewBlob(tb.BlobRef(), tb.Size(), func() readerutil.ReadSeekCloser {
|
blobserver: simplify interface more, add NewMultiBlobStreamer, storagetest func
This is round two of simplifying the BlobStreamer interface. The
continuation tokens are now sent per-item. This permits the following
item (NewMultiBlobStreamer) but also simplifies the return value (only
returns an error now) and permits the use of buffered channels as the
destination without getting out of sync.
The new NewMultiBlobStreamer is like io.MultiReader, but for
BlobStreamers, letting them be stitched together. This will be used by
the blobpacked storage layer to simplify its code, so it doesn't need
to deal with the handoff between loose and packed blobs itself. This
MultiBlobStreamer needs a 1 element buffer, which wasn't compatible
with the old BlobStreamer interface, hence the change to include the
continuation tokens with the blob on the channel.
Finally, add a new storagetest function for testing any blobstreamer
and use it for NewMultiBlobStreamer, diskpacked, and later in
blobpacked.
Updates #532
Change-Id: Iccffed289adec93ca5100c7ef8b0a8d57e05833c
2014-12-23 17:44:05 -08:00
|
|
|
return struct {
|
|
|
|
io.ReadSeeker
|
|
|
|
io.Closer
|
|
|
|
}{
|
|
|
|
io.NewSectionReader(strings.NewReader(s), 0, int64(len(s))),
|
|
|
|
ioutil.NopCloser(nil),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2013-08-03 19:54:30 -07:00
|
|
|
func (tb *Blob) BlobRef() blob.Ref {
|
2011-03-14 22:51:36 -07:00
|
|
|
h := sha1.New()
|
2011-03-18 21:17:26 -07:00
|
|
|
h.Write([]byte(tb.Contents))
|
2013-08-03 19:54:30 -07:00
|
|
|
return blob.RefFromHash(h)
|
2011-03-14 22:51:36 -07:00
|
|
|
}
|
|
|
|
|
2013-11-30 13:06:04 -08:00
|
|
|
func (tb *Blob) SizedRef() blob.SizedRef {
|
2014-12-23 09:58:29 -08:00
|
|
|
return blob.SizedRef{tb.BlobRef(), tb.Size()}
|
2013-11-30 13:06:04 -08:00
|
|
|
}
|
|
|
|
|
2013-08-03 19:54:30 -07:00
|
|
|
func (tb *Blob) BlobRefSlice() []blob.Ref {
|
|
|
|
return []blob.Ref{tb.BlobRef()}
|
2011-03-14 22:51:36 -07:00
|
|
|
}
|
|
|
|
|
2014-12-23 09:58:29 -08:00
|
|
|
func (tb *Blob) Size() uint32 {
|
|
|
|
// Check that it's not larger than a uint32 (possible with
|
|
|
|
// 64-bit ints). But while we're here, be more paranoid and
|
|
|
|
// check for over the default max blob size of 16 MB.
|
|
|
|
if len(tb.Contents) > 16<<20 {
|
|
|
|
panic(fmt.Sprintf("test blob of %d bytes is larger than max 16MB allowed in testing", len(tb.Contents)))
|
|
|
|
}
|
|
|
|
return uint32(len(tb.Contents))
|
2011-03-14 22:51:36 -07:00
|
|
|
}
|
|
|
|
|
2011-03-18 21:17:26 -07:00
|
|
|
func (tb *Blob) Reader() io.Reader {
|
|
|
|
return strings.NewReader(tb.Contents)
|
2011-03-14 22:51:36 -07:00
|
|
|
}
|
|
|
|
|
2013-08-03 19:54:30 -07:00
|
|
|
func (tb *Blob) AssertMatches(t *testing.T, sb blob.SizedRef) {
|
2014-12-23 09:58:29 -08:00
|
|
|
if sb.Size != tb.Size() {
|
2011-03-14 22:51:36 -07:00
|
|
|
t.Fatalf("Got size %d; expected %d", sb.Size, tb.Size())
|
|
|
|
}
|
2013-08-03 19:54:30 -07:00
|
|
|
if sb.Ref != tb.BlobRef() {
|
|
|
|
t.Fatalf("Got blob %q; expected %q", sb.Ref.String(), tb.BlobRef())
|
2011-03-14 22:51:36 -07:00
|
|
|
}
|
|
|
|
}
|
2013-07-07 12:15:19 -07:00
|
|
|
|
|
|
|
func (tb *Blob) MustUpload(t *testing.T, ds blobserver.BlobReceiver) {
|
|
|
|
sb, err := ds.ReceiveBlob(tb.BlobRef(), tb.Reader())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to upload blob %v (%q): %v", tb.BlobRef(), tb.Contents, err)
|
|
|
|
}
|
|
|
|
tb.AssertMatches(t, sb) // TODO: better error reporting
|
|
|
|
}
|