2011-11-07 16:40:31 +00:00
|
|
|
/*
|
|
|
|
Copyright 2011 Google Inc.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package index
|
|
|
|
|
|
|
|
import (
|
2011-11-29 20:40:33 +00:00
|
|
|
"bytes"
|
2011-12-03 19:26:42 +00:00
|
|
|
"crypto/sha1"
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
"errors"
|
2011-11-10 01:15:58 +00:00
|
|
|
"fmt"
|
2012-11-07 22:54:00 +00:00
|
|
|
_ "image/gif"
|
|
|
|
_ "image/jpeg"
|
|
|
|
_ "image/png"
|
2011-11-07 16:40:31 +00:00
|
|
|
"io"
|
|
|
|
"log"
|
2013-02-19 05:31:41 +00:00
|
|
|
"sort"
|
2011-12-01 18:43:57 +00:00
|
|
|
"strings"
|
2013-02-08 05:02:42 +00:00
|
|
|
"sync"
|
2013-02-19 05:31:41 +00:00
|
|
|
"time"
|
2011-11-07 16:40:31 +00:00
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
"camlistore.org/pkg/blob"
|
2013-09-16 14:57:14 +00:00
|
|
|
"camlistore.org/pkg/blobserver"
|
2013-03-06 21:54:14 +00:00
|
|
|
"camlistore.org/pkg/images"
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
"camlistore.org/pkg/jsonsign"
|
|
|
|
"camlistore.org/pkg/magic"
|
|
|
|
"camlistore.org/pkg/schema"
|
2013-02-19 05:31:41 +00:00
|
|
|
"camlistore.org/pkg/types"
|
2013-11-17 01:24:02 +00:00
|
|
|
"camlistore.org/pkg/types/camtypes"
|
2013-08-27 01:18:38 +00:00
|
|
|
|
|
|
|
"camlistore.org/third_party/taglib"
|
2011-11-07 16:40:31 +00:00
|
|
|
)
|
|
|
|
|
2013-02-08 05:02:42 +00:00
|
|
|
var reindexMu sync.Mutex
|
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
func (ix *Index) reindex(br blob.Ref) {
|
2013-02-08 05:02:42 +00:00
|
|
|
// TODO: cap how many of these can be going at once, probably more than 1,
|
|
|
|
// and be more efficient than just blocking goroutines. For now, this:
|
|
|
|
reindexMu.Lock()
|
|
|
|
defer reindexMu.Unlock()
|
|
|
|
|
2013-02-08 03:31:44 +00:00
|
|
|
bs := ix.BlobSource
|
|
|
|
if bs == nil {
|
|
|
|
log.Printf("index: can't re-index %v: no BlobSource", br)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
log.Printf("index: starting re-index of %v", br)
|
|
|
|
rc, _, err := bs.FetchStreaming(br)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("index: failed to fetch %v for reindexing: %v", br, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer rc.Close()
|
2013-09-16 14:57:14 +00:00
|
|
|
sb, err := blobserver.Receive(ix, br, rc)
|
2013-02-08 03:31:44 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("index: reindex of %v failed: %v", br, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
log.Printf("index: successfully reindexed %v", sb)
|
|
|
|
}
|
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
type mutationMap map[string]string
|
|
|
|
|
|
|
|
func (mm mutationMap) Set(k, v string) {
|
|
|
|
mm[k] = v
|
|
|
|
}
|
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
func (ix *Index) ReceiveBlob(blobRef blob.Ref, source io.Reader) (retsb blob.SizedRef, err error) {
|
2013-01-22 17:32:40 +00:00
|
|
|
sniffer := NewBlobSniffer(blobRef)
|
2013-09-08 19:38:20 +00:00
|
|
|
written, err := io.Copy(sniffer, source)
|
2011-11-07 16:40:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
sniffer.Parse()
|
2011-11-10 01:15:58 +00:00
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
mm, err := ix.populateMutationMap(blobRef, sniffer)
|
2011-11-10 01:15:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
if err := ix.commit(mm); err != nil {
|
|
|
|
return retsb, err
|
2011-11-10 01:15:58 +00:00
|
|
|
}
|
|
|
|
|
2013-11-17 01:24:02 +00:00
|
|
|
if c := ix.corpus; c != nil {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
c.blobs[blobRef] = camtypes.BlobMeta{
|
|
|
|
Ref: blobRef,
|
|
|
|
Size: int(written),
|
2013-11-17 02:50:01 +00:00
|
|
|
CamliType: c.strLocked(sniffer.CamliType()),
|
2013-11-17 01:24:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-04 14:26:13 +00:00
|
|
|
// TODO(bradfitz): log levels? These are generally noisy
|
|
|
|
// (especially in tests, like search/handler_test), but I
|
|
|
|
// could see it being useful in production. For now, disabled:
|
|
|
|
//
|
2013-01-22 17:32:40 +00:00
|
|
|
// mimeType := sniffer.MIMEType()
|
2012-11-04 14:26:13 +00:00
|
|
|
// log.Printf("indexer: received %s; type=%v; truncated=%v", blobRef, mimeType, sniffer.IsTruncated())
|
2011-11-07 16:40:31 +00:00
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
return blob.SizedRef{blobRef, written}, nil
|
2011-11-07 16:40:31 +00:00
|
|
|
}
|
2011-11-10 01:15:58 +00:00
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
// commit writes the contents of the mutationMap on a batch
|
|
|
|
// mutation and commits that batch.
|
|
|
|
func (ix *Index) commit(mm mutationMap) error {
|
|
|
|
bm := ix.s.BeginBatch()
|
|
|
|
for k, v := range mm {
|
|
|
|
bm.Set(k, v)
|
|
|
|
}
|
|
|
|
return ix.s.CommitBatch(bm)
|
|
|
|
}
|
|
|
|
|
|
|
|
// populateMutationMap populates keys & values that will be committed
|
|
|
|
// into the returned map.
|
2011-11-10 01:15:58 +00:00
|
|
|
//
|
|
|
|
// the blobref can be trusted at this point (it's been fully consumed
|
2011-11-27 15:46:51 +00:00
|
|
|
// and verified to match), and the sniffer has been populated.
|
2013-11-14 23:49:52 +00:00
|
|
|
func (ix *Index) populateMutationMap(br blob.Ref, sniffer *BlobSniffer) (mutationMap, error) {
|
|
|
|
mm := mutationMap{
|
|
|
|
"have:" + br.String(): fmt.Sprintf("%d", sniffer.Size()),
|
|
|
|
"meta:" + br.String(): fmt.Sprintf("%d|%s", sniffer.Size(), sniffer.MIMEType()),
|
|
|
|
}
|
2011-11-27 15:46:51 +00:00
|
|
|
|
2013-01-22 17:32:40 +00:00
|
|
|
if blob, ok := sniffer.SchemaBlob(); ok {
|
|
|
|
switch blob.Type() {
|
2011-11-27 23:21:26 +00:00
|
|
|
case "claim":
|
2013-11-14 23:49:52 +00:00
|
|
|
if err := ix.populateClaim(blob, mm); err != nil {
|
|
|
|
return nil, err
|
2011-11-27 15:46:51 +00:00
|
|
|
}
|
2011-11-27 23:21:26 +00:00
|
|
|
case "permanode":
|
2013-11-14 23:49:52 +00:00
|
|
|
//if err := mi.populatePermanode(blobRef, camli, mm); err != nil {
|
|
|
|
//return nil, err
|
2011-11-27 23:21:26 +00:00
|
|
|
//}
|
|
|
|
case "file":
|
2013-11-14 23:49:52 +00:00
|
|
|
if err := ix.populateFile(blob, mm); err != nil {
|
|
|
|
return nil, err
|
2011-12-03 19:26:42 +00:00
|
|
|
}
|
2013-01-09 15:58:20 +00:00
|
|
|
case "directory":
|
2013-11-14 23:49:52 +00:00
|
|
|
if err := ix.populateDir(blob, mm); err != nil {
|
|
|
|
return nil, err
|
2013-01-09 15:58:20 +00:00
|
|
|
}
|
2011-11-27 15:46:51 +00:00
|
|
|
}
|
|
|
|
}
|
2013-11-14 23:49:52 +00:00
|
|
|
return mm, nil
|
2011-11-10 01:15:58 +00:00
|
|
|
}
|
2011-11-27 23:21:26 +00:00
|
|
|
|
2013-02-19 05:31:41 +00:00
|
|
|
// keepFirstN keeps the first N bytes written to it in Bytes.
|
|
|
|
type keepFirstN struct {
|
|
|
|
N int
|
|
|
|
Bytes []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *keepFirstN) Write(p []byte) (n int, err error) {
|
|
|
|
if n := w.N - len(w.Bytes); n > 0 {
|
|
|
|
if n > len(p) {
|
|
|
|
n = len(p)
|
|
|
|
}
|
|
|
|
w.Bytes = append(w.Bytes, p[:n]...)
|
|
|
|
}
|
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
// b: the parsed file schema blob
|
|
|
|
// mm: keys to populate
|
|
|
|
func (ix *Index) populateFile(b *schema.Blob, mm mutationMap) error {
|
2013-02-19 05:31:41 +00:00
|
|
|
var times []time.Time // all creation or mod times seen; may be zero
|
2013-08-04 02:54:30 +00:00
|
|
|
times = append(times, b.ModTime())
|
2013-01-22 17:32:40 +00:00
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
blobRef := b.BlobRef()
|
|
|
|
seekFetcher := blob.SeekerFromStreamingFetcher(ix.BlobSource)
|
|
|
|
fr, err := b.NewFileReader(seekFetcher)
|
2011-12-03 19:26:42 +00:00
|
|
|
if err != nil {
|
|
|
|
// TODO(bradfitz): propagate up a transient failure
|
|
|
|
// error type, so we can retry indexing files in the
|
|
|
|
// future if blobs are only temporarily unavailable.
|
|
|
|
// Basically the same as the TODO just below.
|
|
|
|
log.Printf("index: error indexing file, creating NewFileReader %s: %v", blobRef, err)
|
|
|
|
return nil
|
|
|
|
}
|
2013-01-01 02:02:13 +00:00
|
|
|
defer fr.Close()
|
2013-07-08 04:12:18 +00:00
|
|
|
mime, reader := magic.MIMETypeFromReader(fr)
|
2012-11-07 22:54:00 +00:00
|
|
|
|
2012-12-25 18:27:35 +00:00
|
|
|
sha1 := sha1.New()
|
2012-11-07 22:54:00 +00:00
|
|
|
var copyDest io.Writer = sha1
|
2013-02-19 05:31:41 +00:00
|
|
|
var imageBuf *keepFirstN // or nil
|
2012-11-07 22:54:00 +00:00
|
|
|
if strings.HasPrefix(mime, "image/") {
|
2013-02-19 05:31:41 +00:00
|
|
|
imageBuf = &keepFirstN{N: 256 << 10}
|
|
|
|
copyDest = io.MultiWriter(copyDest, imageBuf)
|
2012-11-07 22:54:00 +00:00
|
|
|
}
|
|
|
|
size, err := io.Copy(copyDest, reader)
|
2011-12-03 19:26:42 +00:00
|
|
|
if err != nil {
|
|
|
|
// TODO: job scheduling system to retry this spaced
|
|
|
|
// out max n times. Right now our options are
|
|
|
|
// ignoring this error (forever) or returning the
|
|
|
|
// error and making the indexing try again (likely
|
|
|
|
// forever failing). Both options suck. For now just
|
|
|
|
// log and act like all's okay.
|
|
|
|
log.Printf("index: error indexing file %s: %v", blobRef, err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-02-19 05:31:41 +00:00
|
|
|
if imageBuf != nil {
|
2013-03-06 21:54:14 +00:00
|
|
|
if conf, err := images.DecodeConfig(bytes.NewReader(imageBuf.Bytes)); err == nil {
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
|
2013-02-19 05:31:41 +00:00
|
|
|
}
|
|
|
|
if ft, err := schema.FileTime(bytes.NewReader(imageBuf.Bytes)); err == nil {
|
2013-08-04 02:54:30 +00:00
|
|
|
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
|
2013-02-19 05:31:41 +00:00
|
|
|
times = append(times, ft)
|
|
|
|
} else {
|
2013-08-04 02:54:30 +00:00
|
|
|
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
|
2013-02-19 05:31:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var sortTimes []time.Time
|
|
|
|
for _, t := range times {
|
|
|
|
if !t.IsZero() {
|
|
|
|
sortTimes = append(sortTimes, t)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Sort(types.ByTime(sortTimes))
|
|
|
|
var time3339s string
|
|
|
|
switch {
|
|
|
|
case len(sortTimes) == 1:
|
|
|
|
time3339s = types.Time3339(sortTimes[0]).String()
|
|
|
|
case len(sortTimes) >= 2:
|
|
|
|
oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1]
|
|
|
|
time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String()
|
|
|
|
}
|
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
wholeRef := blob.RefFromHash(sha1)
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
|
|
|
|
mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, b.FileName(), mime))
|
|
|
|
mm.Set(keyFileTimes.Key(blobRef), keyFileTimes.Val(time3339s))
|
2013-08-27 01:18:38 +00:00
|
|
|
|
|
|
|
if strings.HasPrefix(mime, "audio/") {
|
|
|
|
tag, err := taglib.Decode(fr, fr.Size())
|
|
|
|
if err == nil {
|
2013-11-14 23:49:52 +00:00
|
|
|
indexMusic(tag, wholeRef, mm)
|
2013-08-27 01:18:38 +00:00
|
|
|
} else {
|
|
|
|
log.Print("index: error parsing tag: ", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-03 19:26:42 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-08-27 01:18:38 +00:00
|
|
|
// indexMusic adds mutations to index the wholeRef by most of the
|
|
|
|
// fields in gotaglib.GenericTag.
|
2013-11-14 23:49:52 +00:00
|
|
|
func indexMusic(tag taglib.GenericTag, wholeRef blob.Ref, mm mutationMap) {
|
2013-08-27 01:18:38 +00:00
|
|
|
const justYearLayout = "2006"
|
|
|
|
|
|
|
|
var yearStr, trackStr string
|
|
|
|
if !tag.Year().IsZero() {
|
|
|
|
yearStr = tag.Year().Format(justYearLayout)
|
|
|
|
}
|
|
|
|
if tag.Track() != 0 {
|
|
|
|
trackStr = fmt.Sprintf("%d", tag.Track())
|
|
|
|
}
|
|
|
|
|
|
|
|
tags := map[string]string{
|
|
|
|
"title": tag.Title(),
|
|
|
|
"artist": tag.Artist(),
|
|
|
|
"album": tag.Album(),
|
|
|
|
"genre": tag.Genre(),
|
|
|
|
"year": yearStr,
|
|
|
|
"track": trackStr,
|
|
|
|
}
|
|
|
|
|
|
|
|
for tag, value := range tags {
|
|
|
|
if value != "" {
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(keyAudioTag.Key(tag, strings.ToLower(value), wholeRef), "1")
|
2013-08-27 01:18:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
// b: the parsed file schema blob
|
|
|
|
// mm: keys to populate
|
|
|
|
func (ix *Index) populateDir(b *schema.Blob, mm mutationMap) error {
|
2013-08-04 02:54:30 +00:00
|
|
|
blobRef := b.BlobRef()
|
2013-02-19 05:31:41 +00:00
|
|
|
// TODO(bradfitz): move the NewDirReader and FileName method off *schema.Blob and onto
|
2013-01-22 17:32:40 +00:00
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
seekFetcher := blob.SeekerFromStreamingFetcher(ix.BlobSource)
|
|
|
|
dr, err := b.NewDirReader(seekFetcher)
|
2013-01-09 15:58:20 +00:00
|
|
|
if err != nil {
|
|
|
|
// TODO(bradfitz): propagate up a transient failure
|
|
|
|
// error type, so we can retry indexing files in the
|
|
|
|
// future if blobs are only temporarily unavailable.
|
|
|
|
log.Printf("index: error indexing directory, creating NewDirReader %s: %v", blobRef, err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
sts, err := dr.StaticSet()
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("index: error indexing directory: can't get StaticSet: %v\n", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(len(sts), b.FileName(), ""))
|
2013-09-10 20:14:53 +00:00
|
|
|
for _, br := range sts {
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(keyStaticDirChild.Key(blobRef, br.String()), "1")
|
2013-09-10 20:14:53 +00:00
|
|
|
}
|
2013-01-09 15:58:20 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
func (ix *Index) populateClaim(b *schema.Blob, mm mutationMap) error {
|
2013-08-04 02:54:30 +00:00
|
|
|
br := b.BlobRef()
|
2013-01-22 17:32:40 +00:00
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
claim, ok := b.AsClaim()
|
2013-01-22 17:32:40 +00:00
|
|
|
if !ok {
|
2011-11-27 23:21:26 +00:00
|
|
|
// Skip bogus claim with malformed permanode.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-01-22 17:32:40 +00:00
|
|
|
pnbr := claim.ModifiedPermanode()
|
2013-08-04 02:54:30 +00:00
|
|
|
if !pnbr.Valid() {
|
2013-01-22 17:32:40 +00:00
|
|
|
// A different type of claim; not modifying a permanode.
|
|
|
|
return nil
|
2011-11-27 23:21:26 +00:00
|
|
|
}
|
2013-01-22 17:32:40 +00:00
|
|
|
attr, value := claim.Attribute(), claim.Value()
|
2011-11-27 23:21:26 +00:00
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
vr := jsonsign.NewVerificationRequest(b.JSON(), ix.KeyFetcher)
|
2011-11-27 23:21:26 +00:00
|
|
|
if !vr.Verify() {
|
|
|
|
// TODO(bradfitz): ask if the vr.Err.(jsonsign.Error).IsPermanent() and retry
|
|
|
|
// later if it's not permanent? or maybe do this up a level?
|
|
|
|
if vr.Err != nil {
|
|
|
|
return vr.Err
|
|
|
|
}
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
return errors.New("index: populateClaim verification failure")
|
2011-11-27 23:21:26 +00:00
|
|
|
}
|
|
|
|
verifiedKeyId := vr.SignerKeyId
|
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set("signerkeyid:"+vr.CamliSigner.String(), verifiedKeyId)
|
2011-11-27 23:21:26 +00:00
|
|
|
|
2013-01-22 17:32:40 +00:00
|
|
|
recentKey := keyRecentPermanode.Key(verifiedKeyId, claim.ClaimDateString(), br)
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(recentKey, pnbr.String())
|
2011-11-27 23:21:26 +00:00
|
|
|
|
2013-11-07 00:09:11 +00:00
|
|
|
claimKey := keyPermanodeClaim.Key(pnbr, verifiedKeyId, claim.ClaimDateString(), br)
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(claimKey, keyPermanodeClaim.Val(claim.ClaimType(), attr, value))
|
2011-11-29 20:40:33 +00:00
|
|
|
|
2013-01-22 17:32:40 +00:00
|
|
|
if strings.HasPrefix(attr, "camliPath:") {
|
2013-08-04 02:54:30 +00:00
|
|
|
targetRef, ok := blob.Parse(value)
|
|
|
|
if ok {
|
2011-12-01 18:43:57 +00:00
|
|
|
// TODO: deal with set-attribute vs. del-attribute
|
|
|
|
// properly? I think we get it for free when
|
|
|
|
// del-attribute has no Value, but we need to deal
|
|
|
|
// with the case where they explicitly delete the
|
|
|
|
// current value.
|
2013-01-22 17:32:40 +00:00
|
|
|
suffix := attr[len("camliPath:"):]
|
2011-12-01 18:43:57 +00:00
|
|
|
active := "Y"
|
2013-01-22 17:32:40 +00:00
|
|
|
if claim.ClaimType() == "del-attribute" {
|
2011-12-01 18:43:57 +00:00
|
|
|
active = "N"
|
|
|
|
}
|
2011-12-02 02:06:25 +00:00
|
|
|
baseRef := pnbr
|
|
|
|
claimRef := br
|
|
|
|
|
|
|
|
key := keyPathBackward.Key(verifiedKeyId, targetRef, claimRef)
|
2013-01-22 17:32:40 +00:00
|
|
|
val := keyPathBackward.Val(claim.ClaimDateString(), baseRef, active, suffix)
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(key, val)
|
2011-12-02 02:06:25 +00:00
|
|
|
|
2013-01-22 17:32:40 +00:00
|
|
|
key = keyPathForward.Key(verifiedKeyId, baseRef, suffix, claim.ClaimDateString(), claimRef)
|
2011-12-02 02:06:25 +00:00
|
|
|
val = keyPathForward.Val(active, targetRef)
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(key, val)
|
2011-12-01 18:43:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-16 23:00:30 +00:00
|
|
|
if IsIndexedAttribute(attr) {
|
2013-01-22 17:32:40 +00:00
|
|
|
key := keySignerAttrValue.Key(verifiedKeyId, attr, value, claim.ClaimDateString(), br)
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(key, keySignerAttrValue.Val(pnbr))
|
2011-11-28 03:29:23 +00:00
|
|
|
}
|
2012-11-03 13:25:48 +00:00
|
|
|
|
2013-11-16 23:00:30 +00:00
|
|
|
if IsBlobReferenceAttribute(attr) {
|
2013-08-04 02:54:30 +00:00
|
|
|
targetRef, ok := blob.Parse(value)
|
|
|
|
if ok {
|
2012-11-03 13:25:48 +00:00
|
|
|
key := keyEdgeBackward.Key(targetRef, pnbr, br)
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(key, keyEdgeBackward.Val("permanode", ""))
|
2012-11-03 13:25:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-27 23:21:26 +00:00
|
|
|
return nil
|
|
|
|
}
|
2011-11-29 20:40:33 +00:00
|
|
|
|
|
|
|
// pipes returns args separated by pipes
|
|
|
|
func pipes(args ...interface{}) string {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
for n, arg := range args {
|
|
|
|
if n > 0 {
|
|
|
|
buf.WriteString("|")
|
|
|
|
}
|
|
|
|
if s, ok := arg.(string); ok {
|
|
|
|
buf.WriteString(s)
|
|
|
|
} else {
|
|
|
|
buf.WriteString(arg.(fmt.Stringer).String())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return buf.String()
|
|
|
|
}
|