2011-05-29 17:50:17 +00:00
|
|
|
/*
|
|
|
|
Copyright 2011 Google Inc.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package schema
|
|
|
|
|
|
|
|
import (
|
2011-06-06 15:54:31 +00:00
|
|
|
"bufio"
|
2011-05-29 17:50:17 +00:00
|
|
|
"bytes"
|
|
|
|
"crypto"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2011-05-30 01:22:56 +00:00
|
|
|
"log"
|
2011-05-29 17:50:17 +00:00
|
|
|
"strings"
|
|
|
|
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
"camlistore.org/pkg/blobref"
|
|
|
|
"camlistore.org/pkg/blobserver"
|
|
|
|
"camlistore.org/pkg/rollsum"
|
2011-05-29 17:50:17 +00:00
|
|
|
)
|
|
|
|
|
2012-10-29 01:03:05 +00:00
|
|
|
const (
|
|
|
|
// maxBlobSize is the largest blob we ever make when cutting up
|
|
|
|
// a file.
|
|
|
|
maxBlobSize = 1 << 20
|
|
|
|
|
|
|
|
// firstChunkSize is the ideal size of the first chunk of a
|
|
|
|
// file. It's kept smaller for the file(1) command, which
|
|
|
|
// likes to read 96 kB on Linux and 256 kB on OS X. Related
|
|
|
|
// are tools which extract the EXIF metadata from JPEGs,
|
|
|
|
// ID3 from mp3s, etc. Nautilus, OS X Finder, etc.
|
|
|
|
// The first chunk may be larger than this if cutting the file
|
|
|
|
// here would create a small subsequent chunk (e.g. a file one
|
|
|
|
// byte larger than firstChunkSize)
|
|
|
|
firstChunkSize = 256 << 10
|
|
|
|
|
|
|
|
// bufioReaderSize is an explicit size for our bufio.Reader,
|
|
|
|
// so we don't rely on NewReader's implicit size.
|
|
|
|
// We care about the buffer size because it affects how far
|
|
|
|
// in advance we can detect EOF from an io.Reader that doesn't
|
|
|
|
// know its size. Detecting an EOF bufioReaderSize bytes early
|
|
|
|
// means we can plan for the final chunk.
|
|
|
|
bufioReaderSize = 32 << 10
|
|
|
|
|
|
|
|
// tooSmallThreshold is the threshold at which rolling checksum
|
|
|
|
// boundaries are ignored if the current chunk being built is
|
|
|
|
// smaller than this.
|
|
|
|
tooSmallThreshold = 64 << 10
|
|
|
|
)
|
2011-10-31 00:17:34 +00:00
|
|
|
|
2011-05-30 01:22:56 +00:00
|
|
|
var _ = log.Printf
|
|
|
|
|
2011-05-29 17:50:17 +00:00
|
|
|
// WriteFileFromReader creates and uploads a "file" JSON schema
|
|
|
|
// composed of chunks of r, also uploading the chunks. The returned
|
|
|
|
// BlobRef is of the JSON file schema blob.
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
func WriteFileFromReader(bs blobserver.StatReceiver, filename string, r io.Reader) (*blobref.BlobRef, error) {
|
2011-09-28 18:07:07 +00:00
|
|
|
m := NewFileMap(filename)
|
|
|
|
return WriteFileMap(bs, m, r)
|
|
|
|
}
|
2011-05-29 17:50:17 +00:00
|
|
|
|
2012-08-21 17:31:44 +00:00
|
|
|
// WriteFileMap uploads chunks of r to bs while populating fileMap and
|
|
|
|
// finally uploading fileMap. The returned blobref is of fileMap's
|
|
|
|
// JSON blob.
|
2012-08-21 17:36:36 +00:00
|
|
|
func WriteFileMap(bs blobserver.StatReceiver, fileMap Map, r io.Reader) (*blobref.BlobRef, error) {
|
2012-10-29 01:03:05 +00:00
|
|
|
return writeFileMapRolling(bs, fileMap, r)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is the simple 1MB chunk version. The rolling checksum version is below.
|
|
|
|
func writeFileMapOld(bs blobserver.StatReceiver, fileMap Map, r io.Reader) (*blobref.BlobRef, error) {
|
2011-09-08 00:51:29 +00:00
|
|
|
parts, size := []BytesPart{}, int64(0)
|
2011-05-29 17:50:17 +00:00
|
|
|
|
2012-08-21 17:31:44 +00:00
|
|
|
var buf bytes.Buffer
|
2011-05-29 17:50:17 +00:00
|
|
|
for {
|
|
|
|
buf.Reset()
|
2012-08-21 18:14:47 +00:00
|
|
|
n, err := io.Copy(&buf, io.LimitReader(r, maxBlobSize))
|
2011-05-29 17:50:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if n == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
hash := crypto.SHA1.New()
|
|
|
|
io.Copy(hash, bytes.NewBuffer(buf.Bytes()))
|
|
|
|
br := blobref.FromHash("sha1", hash)
|
|
|
|
hasBlob, err := serverHasBlob(bs, br)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2011-05-30 01:22:56 +00:00
|
|
|
if !hasBlob {
|
2012-08-21 17:31:44 +00:00
|
|
|
sb, err := bs.ReceiveBlob(br, &buf)
|
2011-05-30 01:22:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if expect := (blobref.SizedBlobRef{br, n}); !expect.Equal(sb) {
|
|
|
|
return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
|
|
|
|
}
|
2011-05-29 17:50:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size += n
|
2011-09-08 00:51:29 +00:00
|
|
|
parts = append(parts, BytesPart{
|
2011-07-02 16:09:50 +00:00
|
|
|
BlobRef: br,
|
|
|
|
Size: uint64(n),
|
|
|
|
Offset: 0, // into BlobRef to read from (not of dest)
|
2011-05-29 17:50:17 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2011-09-28 18:07:07 +00:00
|
|
|
err := PopulateParts(fileMap, size, parts)
|
2011-05-29 17:50:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2012-08-21 17:47:38 +00:00
|
|
|
json, err := fileMap.JSON()
|
2011-05-29 17:50:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2011-11-27 19:06:12 +00:00
|
|
|
br := blobref.SHA1FromString(json)
|
2011-05-29 17:50:17 +00:00
|
|
|
sb, err := bs.ReceiveBlob(br, strings.NewReader(json))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if expect := (blobref.SizedBlobRef{br, int64(len(json))}); !expect.Equal(sb) {
|
|
|
|
return nil, fmt.Errorf("schema/filewriter: wrote %s bytes, got %s ack'd", expect, sb)
|
|
|
|
}
|
|
|
|
|
|
|
|
return br, nil
|
|
|
|
}
|
|
|
|
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
func serverHasBlob(bs blobserver.BlobStatter, br *blobref.BlobRef) (have bool, err error) {
|
2011-05-29 17:50:17 +00:00
|
|
|
ch := make(chan blobref.SizedBlobRef, 1)
|
|
|
|
go func() {
|
2011-09-29 02:37:28 +00:00
|
|
|
err = bs.StatBlobs(ch, []*blobref.BlobRef{br}, 0)
|
2011-05-29 17:50:17 +00:00
|
|
|
close(ch)
|
|
|
|
}()
|
|
|
|
for _ = range ch {
|
|
|
|
have = true
|
|
|
|
}
|
|
|
|
return
|
2011-06-06 15:54:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type span struct {
|
|
|
|
from, to int64
|
|
|
|
bits int
|
|
|
|
br *blobref.BlobRef
|
|
|
|
children []span
|
|
|
|
}
|
|
|
|
|
2012-10-29 01:03:05 +00:00
|
|
|
func (s *span) isSingleBlob() bool {
|
|
|
|
return len(s.children) == 0
|
|
|
|
}
|
|
|
|
|
2011-06-06 15:54:31 +00:00
|
|
|
func (s *span) size() int64 {
|
|
|
|
size := s.to - s.from
|
|
|
|
for _, cs := range s.children {
|
|
|
|
size += cs.size()
|
|
|
|
}
|
|
|
|
return size
|
|
|
|
}
|
|
|
|
|
2012-10-29 01:03:05 +00:00
|
|
|
// noteEOFReader keeps track of when it's seen EOF, but otherwise
|
|
|
|
// delegates entirely to r.
|
|
|
|
type noteEOFReader struct {
|
|
|
|
r io.Reader
|
|
|
|
sawEOF bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *noteEOFReader) Read(p []byte) (n int, err error) {
|
|
|
|
n, err = r.r.Read(p)
|
|
|
|
if err == io.EOF {
|
|
|
|
r.sawEOF = true
|
|
|
|
}
|
|
|
|
return
|
2011-09-28 18:07:07 +00:00
|
|
|
}
|
|
|
|
|
2012-12-20 17:34:14 +00:00
|
|
|
func uploadString(bs blobserver.StatReceiver, s string) (*blobref.BlobRef, error) {
|
|
|
|
br := blobref.SHA1FromString(s)
|
|
|
|
hasIt, err := serverHasBlob(bs, br)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if hasIt {
|
|
|
|
return br, nil
|
|
|
|
}
|
|
|
|
_, err = bs.ReceiveBlob(br, strings.NewReader(s))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return br, nil
|
|
|
|
}
|
2011-06-06 15:54:31 +00:00
|
|
|
|
2012-12-20 17:34:14 +00:00
|
|
|
// uploadBytes gets a map from mapSource (of type either "bytes" or
|
|
|
|
// "file", which is a superset of "bytes"), sets it to the provided
|
|
|
|
// size, and populates with provided spans. The bytes or file schema
|
|
|
|
// blob is uploaded and its blobref is returned.
|
|
|
|
func uploadBytes(bs blobserver.StatReceiver, mapSource func() Map, size int64, s []span) (*blobref.BlobRef, error) {
|
|
|
|
parts := []BytesPart{}
|
|
|
|
err := addBytesParts(bs, &parts, s)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
m := mapSource()
|
|
|
|
err = PopulateParts(m, size, parts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
json, err := m.JSON()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return uploadString(bs, json)
|
|
|
|
}
|
2012-10-29 01:03:05 +00:00
|
|
|
|
2012-12-20 17:34:14 +00:00
|
|
|
// addBytesParts uploads the provided spans to bs, appending elements to *dst.
|
|
|
|
func addBytesParts(bs blobserver.StatReceiver, dst *[]BytesPart, spans []span) error {
|
|
|
|
for _, sp := range spans {
|
|
|
|
if len(sp.children) == 1 && sp.children[0].isSingleBlob() {
|
|
|
|
// Remove an occasional useless indirection of
|
|
|
|
// what would become a bytes schema blob
|
|
|
|
// pointing to a single blobref. Just promote
|
|
|
|
// the blobref child instead.
|
|
|
|
child := sp.children[0]
|
|
|
|
*dst = append(*dst, BytesPart{
|
|
|
|
BlobRef: child.br,
|
|
|
|
Size: uint64(child.size()),
|
|
|
|
})
|
|
|
|
sp.children = nil
|
|
|
|
}
|
|
|
|
if len(sp.children) > 0 {
|
|
|
|
childrenSize := int64(0)
|
|
|
|
for _, cs := range sp.children {
|
|
|
|
childrenSize += cs.size()
|
|
|
|
}
|
|
|
|
br, err := uploadBytes(bs, newBytes, childrenSize, sp.children)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
*dst = append(*dst, BytesPart{
|
|
|
|
BytesRef: br,
|
|
|
|
Size: uint64(childrenSize),
|
|
|
|
})
|
2011-06-06 15:54:31 +00:00
|
|
|
}
|
2012-12-20 17:34:14 +00:00
|
|
|
if sp.from == sp.to {
|
|
|
|
panic("Shouldn't happen. " + fmt.Sprintf("weird span with same from & to: %#v", sp))
|
2011-06-06 15:54:31 +00:00
|
|
|
}
|
2012-12-20 17:34:14 +00:00
|
|
|
*dst = append(*dst, BytesPart{
|
|
|
|
BlobRef: sp.br,
|
|
|
|
Size: uint64(sp.to - sp.from),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeFileMap uploads chunks of r to bs while populating fileMap and
|
|
|
|
// finally uploading fileMap. The returned blobref is of fileMap's
|
|
|
|
// JSON blob. It uses rolling checksum for the chunks sizes.
|
|
|
|
func writeFileMapRolling(bs blobserver.StatReceiver, fileMap Map, r io.Reader) (outbr *blobref.BlobRef, outerr error) {
|
|
|
|
rootFile := func() Map { return fileMap }
|
|
|
|
n, spans, err := writeFileChunks(bs, fileMap, r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// The top-level content parts
|
|
|
|
return uploadBytes(bs, rootFile, n, spans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteFileChunks uploads chunks of r to bs while populating fileMap.
|
|
|
|
// It does not upload fileMap.
|
|
|
|
func WriteFileChunks(bs blobserver.StatReceiver, fileMap Map, r io.Reader) error {
|
|
|
|
rootFile := func() Map { return fileMap }
|
|
|
|
|
|
|
|
n, spans, err := writeFileChunks(bs, fileMap, r)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
topLevel := func(mapSource func() Map, size int64, s []span) error {
|
|
|
|
parts := []BytesPart{}
|
|
|
|
err := addBytesParts(bs, &parts, s)
|
2011-06-06 15:54:31 +00:00
|
|
|
if err != nil {
|
2012-12-20 17:34:14 +00:00
|
|
|
return err
|
2011-06-06 15:54:31 +00:00
|
|
|
}
|
2012-12-20 17:34:14 +00:00
|
|
|
m := mapSource()
|
|
|
|
err = PopulateParts(m, size, parts)
|
|
|
|
return err
|
2011-06-06 15:54:31 +00:00
|
|
|
}
|
|
|
|
|
2012-12-20 17:34:14 +00:00
|
|
|
// The top-level content parts
|
|
|
|
return topLevel(rootFile, n, spans)
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeFileChunks(bs blobserver.StatReceiver, fileMap Map, r io.Reader) (n int64, spans []span, outerr error) {
|
|
|
|
src := ¬eEOFReader{r: r}
|
|
|
|
blobSize := 0 // of the next blob being built, should be same as buf.Len()
|
|
|
|
bufr := bufio.NewReaderSize(src, bufioReaderSize)
|
|
|
|
spans = []span{} // the tree of spans, cut on interesting rollsum boundaries
|
|
|
|
rs := rollsum.New()
|
|
|
|
last := n
|
|
|
|
buf := new(bytes.Buffer)
|
|
|
|
|
2011-06-06 15:54:31 +00:00
|
|
|
// TODO: keep multiple of these in-flight at a time.
|
|
|
|
uploadLastSpan := func() bool {
|
|
|
|
defer buf.Reset()
|
2012-12-20 17:34:14 +00:00
|
|
|
br, err := uploadString(bs, buf.String())
|
2011-06-06 15:54:31 +00:00
|
|
|
if err != nil {
|
|
|
|
outerr = err
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
spans[len(spans)-1].br = br
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
c, err := bufr.ReadByte()
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
if err == io.EOF {
|
2011-06-06 15:54:31 +00:00
|
|
|
if n != last {
|
|
|
|
spans = append(spans, span{from: last, to: n})
|
|
|
|
if !uploadLastSpan() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
2012-12-20 17:34:14 +00:00
|
|
|
return 0, nil, err
|
2011-06-06 15:54:31 +00:00
|
|
|
}
|
|
|
|
|
2012-10-29 01:03:05 +00:00
|
|
|
buf.WriteByte(c)
|
2011-06-06 15:54:31 +00:00
|
|
|
n++
|
2011-10-31 00:17:34 +00:00
|
|
|
blobSize++
|
2011-06-06 15:54:31 +00:00
|
|
|
rs.Roll(c)
|
2012-10-29 01:03:05 +00:00
|
|
|
|
|
|
|
var bits int
|
|
|
|
onRollSplit := rs.OnSplit()
|
|
|
|
switch {
|
|
|
|
case blobSize == maxBlobSize:
|
|
|
|
bits = 20 // arbitrary node weight; 1<<20 == 1MB
|
|
|
|
case src.sawEOF:
|
|
|
|
// Don't split. End is coming soon enough.
|
|
|
|
continue
|
|
|
|
case onRollSplit && n > firstChunkSize && blobSize > tooSmallThreshold:
|
|
|
|
bits = rs.Bits()
|
|
|
|
case n == firstChunkSize:
|
|
|
|
bits = 18 // 1 << 18 == 256KB
|
|
|
|
default:
|
|
|
|
// Don't split.
|
|
|
|
continue
|
2011-06-06 15:54:31 +00:00
|
|
|
}
|
2011-10-31 00:17:34 +00:00
|
|
|
blobSize = 0
|
2011-06-06 15:54:31 +00:00
|
|
|
|
|
|
|
// Take any spans from the end of the spans slice that
|
|
|
|
// have a smaller 'bits' score and make them children
|
|
|
|
// of this node.
|
|
|
|
var children []span
|
|
|
|
childrenFrom := len(spans)
|
|
|
|
for childrenFrom > 0 && spans[childrenFrom-1].bits < bits {
|
|
|
|
childrenFrom--
|
|
|
|
}
|
|
|
|
if nCopy := len(spans) - childrenFrom; nCopy > 0 {
|
|
|
|
children = make([]span, nCopy)
|
|
|
|
copy(children, spans[childrenFrom:])
|
|
|
|
spans = spans[:childrenFrom]
|
|
|
|
}
|
|
|
|
|
|
|
|
spans = append(spans, span{from: last, to: n, bits: bits, children: children})
|
|
|
|
last = n
|
|
|
|
if !uploadLastSpan() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-20 17:34:14 +00:00
|
|
|
return n, spans, nil
|
2011-05-29 17:50:17 +00:00
|
|
|
|
|
|
|
}
|