2011-11-07 16:40:31 +00:00
|
|
|
/*
|
|
|
|
Copyright 2011 Google Inc.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package index
|
|
|
|
|
|
|
|
import (
|
2011-11-29 20:40:33 +00:00
|
|
|
"bytes"
|
2011-12-03 19:26:42 +00:00
|
|
|
"crypto/sha1"
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
"errors"
|
2011-11-10 01:15:58 +00:00
|
|
|
"fmt"
|
2012-11-07 22:54:00 +00:00
|
|
|
_ "image/gif"
|
|
|
|
_ "image/jpeg"
|
|
|
|
_ "image/png"
|
2011-11-07 16:40:31 +00:00
|
|
|
"io"
|
|
|
|
"log"
|
2013-11-18 14:51:47 +00:00
|
|
|
"os"
|
2013-02-19 05:31:41 +00:00
|
|
|
"sort"
|
2011-12-01 18:43:57 +00:00
|
|
|
"strings"
|
2013-02-08 05:02:42 +00:00
|
|
|
"sync"
|
2013-02-19 05:31:41 +00:00
|
|
|
"time"
|
2011-11-07 16:40:31 +00:00
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
"camlistore.org/pkg/blob"
|
2013-09-16 14:57:14 +00:00
|
|
|
"camlistore.org/pkg/blobserver"
|
2013-03-06 21:54:14 +00:00
|
|
|
"camlistore.org/pkg/images"
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
"camlistore.org/pkg/jsonsign"
|
|
|
|
"camlistore.org/pkg/magic"
|
2014-01-24 05:43:12 +00:00
|
|
|
"camlistore.org/pkg/media"
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
"camlistore.org/pkg/schema"
|
2013-02-19 05:31:41 +00:00
|
|
|
"camlistore.org/pkg/types"
|
2013-08-27 01:18:38 +00:00
|
|
|
|
2013-12-24 00:21:19 +00:00
|
|
|
"camlistore.org/third_party/github.com/camlistore/goexif/exif"
|
|
|
|
"camlistore.org/third_party/github.com/camlistore/goexif/tiff"
|
2014-01-21 03:31:15 +00:00
|
|
|
"camlistore.org/third_party/github.com/hjfreyer/taglib-go/taglib"
|
2011-11-07 16:40:31 +00:00
|
|
|
)
|
|
|
|
|
2014-03-15 15:36:52 +00:00
|
|
|
func (ix *Index) outOfOrderIndexerLoop() {
|
|
|
|
if ix.BlobSource == nil {
|
|
|
|
// Bail early. Nothing will work later anyway. (for tests)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
WaitTickle:
|
|
|
|
for _ = range ix.tickleOoo {
|
|
|
|
for {
|
|
|
|
ix.mu.Lock()
|
|
|
|
if len(ix.readyReindex) == 0 {
|
|
|
|
ix.mu.Unlock()
|
|
|
|
continue WaitTickle
|
|
|
|
}
|
|
|
|
var br blob.Ref
|
|
|
|
for br = range ix.readyReindex {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
delete(ix.readyReindex, br)
|
|
|
|
ix.mu.Unlock()
|
|
|
|
|
|
|
|
err := ix.reindex(br)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("out-of-order reindex(%v) = %v", br, err)
|
|
|
|
ix.mu.Lock()
|
|
|
|
if len(ix.needs[br]) == 0 {
|
|
|
|
ix.readyReindex[br] = true
|
|
|
|
}
|
|
|
|
ix.mu.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-24 03:07:17 +00:00
|
|
|
func (ix *Index) reindex(br blob.Ref) error {
|
2013-02-08 03:31:44 +00:00
|
|
|
bs := ix.BlobSource
|
|
|
|
if bs == nil {
|
2013-12-24 03:07:17 +00:00
|
|
|
return fmt.Errorf("index: can't re-index %v: no BlobSource", br)
|
2013-02-08 03:31:44 +00:00
|
|
|
}
|
2014-03-14 19:11:08 +00:00
|
|
|
rc, _, err := bs.Fetch(br)
|
2013-02-08 03:31:44 +00:00
|
|
|
if err != nil {
|
2013-12-24 03:07:17 +00:00
|
|
|
return fmt.Errorf("index: failed to fetch %v for reindexing: %v", br, err)
|
2013-02-08 03:31:44 +00:00
|
|
|
}
|
|
|
|
defer rc.Close()
|
2013-12-24 03:07:17 +00:00
|
|
|
if _, err := blobserver.Receive(ix, br, rc); err != nil {
|
|
|
|
return err
|
2013-02-08 03:31:44 +00:00
|
|
|
}
|
2013-12-24 03:07:17 +00:00
|
|
|
return nil
|
2013-02-08 03:31:44 +00:00
|
|
|
}
|
|
|
|
|
2013-11-27 16:35:21 +00:00
|
|
|
type mutationMap struct {
|
|
|
|
kv map[string]string // the keys and values we populate
|
2014-03-15 15:36:52 +00:00
|
|
|
|
|
|
|
// We record if we get a delete claim, so we can update
|
|
|
|
// the deletes cache right after committing the mutation.
|
|
|
|
//
|
2013-11-27 16:35:21 +00:00
|
|
|
// TODO(mpl): we only need to keep track of one claim so far,
|
|
|
|
// but I chose a slice for when we need to do multi-claims?
|
2014-03-15 15:36:52 +00:00
|
|
|
deletes []schema.Claim
|
2013-11-27 16:35:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (mm *mutationMap) Set(k, v string) {
|
|
|
|
if mm.kv == nil {
|
|
|
|
mm.kv = make(map[string]string)
|
|
|
|
}
|
|
|
|
mm.kv[k] = v
|
|
|
|
}
|
2013-11-14 23:49:52 +00:00
|
|
|
|
2013-11-27 16:35:21 +00:00
|
|
|
func (mm *mutationMap) noteDelete(deleteClaim schema.Claim) {
|
|
|
|
mm.deletes = append(mm.deletes, deleteClaim)
|
2013-11-14 23:49:52 +00:00
|
|
|
}
|
|
|
|
|
2014-03-15 15:36:52 +00:00
|
|
|
func blobsFilteringOut(v []blob.Ref, x blob.Ref) []blob.Ref {
|
|
|
|
switch len(v) {
|
|
|
|
case 0:
|
|
|
|
return nil
|
|
|
|
case 1:
|
|
|
|
if v[0] == x {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return v
|
|
|
|
}
|
|
|
|
nl := v[:0]
|
|
|
|
for _, vb := range v {
|
|
|
|
if vb != x {
|
|
|
|
nl = append(nl, vb)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nl
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ix *Index) noteBlobIndexed(br blob.Ref) {
|
|
|
|
ix.mu.Lock()
|
|
|
|
defer ix.mu.Unlock()
|
|
|
|
for _, needer := range ix.neededBy[br] {
|
|
|
|
newNeeds := blobsFilteringOut(ix.needs[needer], br)
|
|
|
|
if len(newNeeds) == 0 {
|
|
|
|
ix.readyReindex[needer] = true
|
|
|
|
delete(ix.needs, needer)
|
|
|
|
select {
|
|
|
|
case ix.tickleOoo <- true:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ix.needs[needer] = newNeeds
|
|
|
|
}
|
|
|
|
}
|
|
|
|
delete(ix.neededBy, br)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ix *Index) removeAllMissingEdges(br blob.Ref) {
|
|
|
|
var toDelete []string
|
|
|
|
it := ix.queryPrefix(keyMissing, br)
|
|
|
|
for it.Next() {
|
|
|
|
toDelete = append(toDelete, it.Key())
|
|
|
|
}
|
|
|
|
if err := it.Close(); err != nil {
|
|
|
|
// TODO: Care? Can lazily clean up later.
|
|
|
|
log.Printf("Iterator close error: %v", err)
|
|
|
|
}
|
|
|
|
for _, k := range toDelete {
|
|
|
|
if err := ix.s.Delete(k); err != nil {
|
|
|
|
log.Printf("Error deleting key %s: %v", k, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
func (ix *Index) ReceiveBlob(blobRef blob.Ref, source io.Reader) (retsb blob.SizedRef, err error) {
|
2014-03-15 15:36:52 +00:00
|
|
|
missingDeps := false
|
|
|
|
defer func() {
|
|
|
|
if err == nil {
|
|
|
|
ix.noteBlobIndexed(blobRef)
|
|
|
|
if !missingDeps {
|
|
|
|
ix.removeAllMissingEdges(blobRef)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2013-01-22 17:32:40 +00:00
|
|
|
sniffer := NewBlobSniffer(blobRef)
|
2013-09-08 19:38:20 +00:00
|
|
|
written, err := io.Copy(sniffer, source)
|
2011-11-07 16:40:31 +00:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2013-12-31 04:17:47 +00:00
|
|
|
if _, haveErr := ix.s.Get("have:" + blobRef.String()); haveErr == nil {
|
2014-01-28 20:46:52 +00:00
|
|
|
return blob.SizedRef{blobRef, uint32(written)}, nil
|
2013-12-31 04:17:47 +00:00
|
|
|
}
|
2011-11-07 16:40:31 +00:00
|
|
|
|
|
|
|
sniffer.Parse()
|
2011-11-10 01:15:58 +00:00
|
|
|
|
2014-03-14 19:11:08 +00:00
|
|
|
fetcher := &missTrackFetcher{
|
2014-03-14 16:14:44 +00:00
|
|
|
fetcher: ix.BlobSource,
|
|
|
|
}
|
|
|
|
|
|
|
|
mm, err := ix.populateMutationMap(fetcher, blobRef, sniffer)
|
2011-11-10 01:15:58 +00:00
|
|
|
if err != nil {
|
2014-03-14 16:14:44 +00:00
|
|
|
fetcher.mu.Lock()
|
|
|
|
defer fetcher.mu.Unlock()
|
|
|
|
if len(fetcher.missing) == 0 {
|
|
|
|
return
|
|
|
|
}
|
2014-03-15 15:36:52 +00:00
|
|
|
missingDeps = true
|
|
|
|
allRecorded := true
|
|
|
|
for _, missing := range fetcher.missing {
|
|
|
|
if err := ix.noteNeeded(blobRef, missing); err != nil {
|
|
|
|
allRecorded = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if allRecorded {
|
|
|
|
// Lie and say things are good. We've
|
|
|
|
// successfully recorded that the blob isn't
|
|
|
|
// indexed, but we'll reindex it later once
|
|
|
|
// the dependent blobs arrive.
|
|
|
|
return blob.SizedRef{blobRef, uint32(written)}, nil
|
|
|
|
}
|
2011-11-10 01:15:58 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
if err := ix.commit(mm); err != nil {
|
|
|
|
return retsb, err
|
2011-11-10 01:15:58 +00:00
|
|
|
}
|
|
|
|
|
2013-11-17 01:24:02 +00:00
|
|
|
if c := ix.corpus; c != nil {
|
2013-11-17 17:41:45 +00:00
|
|
|
if err = c.addBlob(blobRef, mm); err != nil {
|
|
|
|
return
|
2013-11-17 01:24:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-04 14:26:13 +00:00
|
|
|
// TODO(bradfitz): log levels? These are generally noisy
|
|
|
|
// (especially in tests, like search/handler_test), but I
|
|
|
|
// could see it being useful in production. For now, disabled:
|
|
|
|
//
|
2013-01-22 17:32:40 +00:00
|
|
|
// mimeType := sniffer.MIMEType()
|
2012-11-04 14:26:13 +00:00
|
|
|
// log.Printf("indexer: received %s; type=%v; truncated=%v", blobRef, mimeType, sniffer.IsTruncated())
|
2011-11-07 16:40:31 +00:00
|
|
|
|
2014-01-28 20:46:52 +00:00
|
|
|
return blob.SizedRef{blobRef, uint32(written)}, nil
|
2011-11-07 16:40:31 +00:00
|
|
|
}
|
2011-11-10 01:15:58 +00:00
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
// commit writes the contents of the mutationMap on a batch
|
2013-11-27 16:35:21 +00:00
|
|
|
// mutation and commits that batch. It also updates the deletes
|
|
|
|
// cache.
|
|
|
|
func (ix *Index) commit(mm *mutationMap) error {
|
|
|
|
// We want the update of the deletes cache to be atomic
|
|
|
|
// with the transaction commit, so we lock here instead
|
|
|
|
// of within updateDeletesCache.
|
|
|
|
ix.deletes.Lock()
|
|
|
|
defer ix.deletes.Unlock()
|
2013-11-14 23:49:52 +00:00
|
|
|
bm := ix.s.BeginBatch()
|
2013-11-27 16:35:21 +00:00
|
|
|
for k, v := range mm.kv {
|
2013-11-14 23:49:52 +00:00
|
|
|
bm.Set(k, v)
|
|
|
|
}
|
2013-11-27 16:35:21 +00:00
|
|
|
err := ix.s.CommitBatch(bm)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, cl := range mm.deletes {
|
|
|
|
if err := ix.updateDeletesCache(cl); err != nil {
|
|
|
|
return fmt.Errorf("Could not update the deletes cache after deletion from %v: %v", cl, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
2013-11-14 23:49:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// populateMutationMap populates keys & values that will be committed
|
|
|
|
// into the returned map.
|
2011-11-10 01:15:58 +00:00
|
|
|
//
|
|
|
|
// the blobref can be trusted at this point (it's been fully consumed
|
2011-11-27 15:46:51 +00:00
|
|
|
// and verified to match), and the sniffer has been populated.
|
2014-03-14 19:11:08 +00:00
|
|
|
func (ix *Index) populateMutationMap(fetcher *missTrackFetcher, br blob.Ref, sniffer *BlobSniffer) (*mutationMap, error) {
|
2013-11-18 14:51:47 +00:00
|
|
|
// TODO(mpl): shouldn't we remove these two from the map (so they don't get committed) when
|
|
|
|
// e.g in populateClaim we detect a bogus claim (which does not yield an error)?
|
2013-11-27 16:35:21 +00:00
|
|
|
mm := &mutationMap{
|
|
|
|
kv: map[string]string{
|
|
|
|
"have:" + br.String(): fmt.Sprintf("%d", sniffer.Size()),
|
|
|
|
"meta:" + br.String(): fmt.Sprintf("%d|%s", sniffer.Size(), sniffer.MIMEType()),
|
|
|
|
},
|
2013-11-14 23:49:52 +00:00
|
|
|
}
|
2011-11-27 15:46:51 +00:00
|
|
|
|
2013-01-22 17:32:40 +00:00
|
|
|
if blob, ok := sniffer.SchemaBlob(); ok {
|
|
|
|
switch blob.Type() {
|
2011-11-27 23:21:26 +00:00
|
|
|
case "claim":
|
2014-04-02 20:39:36 +00:00
|
|
|
if err := ix.populateClaim(fetcher, blob, mm); err != nil {
|
2013-11-14 23:49:52 +00:00
|
|
|
return nil, err
|
2011-11-27 15:46:51 +00:00
|
|
|
}
|
2011-11-27 23:21:26 +00:00
|
|
|
case "file":
|
2014-03-14 16:14:44 +00:00
|
|
|
if err := ix.populateFile(fetcher, blob, mm); err != nil {
|
2013-11-14 23:49:52 +00:00
|
|
|
return nil, err
|
2011-12-03 19:26:42 +00:00
|
|
|
}
|
2013-01-09 15:58:20 +00:00
|
|
|
case "directory":
|
2014-03-14 16:14:44 +00:00
|
|
|
if err := ix.populateDir(fetcher, blob, mm); err != nil {
|
2013-11-14 23:49:52 +00:00
|
|
|
return nil, err
|
2013-01-09 15:58:20 +00:00
|
|
|
}
|
2011-11-27 15:46:51 +00:00
|
|
|
}
|
|
|
|
}
|
2014-03-15 15:36:52 +00:00
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
return mm, nil
|
2011-11-10 01:15:58 +00:00
|
|
|
}
|
2011-11-27 23:21:26 +00:00
|
|
|
|
2013-02-19 05:31:41 +00:00
|
|
|
// keepFirstN keeps the first N bytes written to it in Bytes.
|
|
|
|
type keepFirstN struct {
|
|
|
|
N int
|
|
|
|
Bytes []byte
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *keepFirstN) Write(p []byte) (n int, err error) {
|
|
|
|
if n := w.N - len(w.Bytes); n > 0 {
|
|
|
|
if n > len(p) {
|
|
|
|
n = len(p)
|
|
|
|
}
|
|
|
|
w.Bytes = append(w.Bytes, p[:n]...)
|
|
|
|
}
|
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
2014-03-14 19:11:08 +00:00
|
|
|
// missTrackFetcher is a blob.Fetcher that records which blob(s) it
|
|
|
|
// failed to load from src.
|
|
|
|
type missTrackFetcher struct {
|
|
|
|
fetcher blob.Fetcher
|
2014-03-14 16:14:44 +00:00
|
|
|
|
2013-12-01 07:15:07 +00:00
|
|
|
mu sync.Mutex // guards missing
|
|
|
|
missing []blob.Ref
|
|
|
|
}
|
|
|
|
|
2014-03-14 19:11:08 +00:00
|
|
|
func (f *missTrackFetcher) Fetch(br blob.Ref) (blob io.ReadCloser, size uint32, err error) {
|
|
|
|
blob, size, err = f.fetcher.Fetch(br)
|
2013-12-01 07:15:07 +00:00
|
|
|
if err == os.ErrNotExist {
|
|
|
|
f.mu.Lock()
|
|
|
|
defer f.mu.Unlock()
|
|
|
|
f.missing = append(f.missing, br)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
// b: the parsed file schema blob
|
|
|
|
// mm: keys to populate
|
2014-03-14 19:11:08 +00:00
|
|
|
func (ix *Index) populateFile(fetcher blob.Fetcher, b *schema.Blob, mm *mutationMap) (err error) {
|
2013-02-19 05:31:41 +00:00
|
|
|
var times []time.Time // all creation or mod times seen; may be zero
|
2013-08-04 02:54:30 +00:00
|
|
|
times = append(times, b.ModTime())
|
2013-01-22 17:32:40 +00:00
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
blobRef := b.BlobRef()
|
2013-12-01 07:15:07 +00:00
|
|
|
fr, err := b.NewFileReader(fetcher)
|
2011-12-03 19:26:42 +00:00
|
|
|
if err != nil {
|
2014-03-15 15:36:52 +00:00
|
|
|
return err
|
2011-12-03 19:26:42 +00:00
|
|
|
}
|
2013-01-01 02:02:13 +00:00
|
|
|
defer fr.Close()
|
2013-07-08 04:12:18 +00:00
|
|
|
mime, reader := magic.MIMETypeFromReader(fr)
|
2012-11-07 22:54:00 +00:00
|
|
|
|
2012-12-25 18:27:35 +00:00
|
|
|
sha1 := sha1.New()
|
2012-11-07 22:54:00 +00:00
|
|
|
var copyDest io.Writer = sha1
|
2013-02-19 05:31:41 +00:00
|
|
|
var imageBuf *keepFirstN // or nil
|
2012-11-07 22:54:00 +00:00
|
|
|
if strings.HasPrefix(mime, "image/") {
|
2013-12-15 21:40:09 +00:00
|
|
|
// Emperically derived 1MiB assuming CR2 images require more than any
|
|
|
|
// other filetype we support:
|
|
|
|
// https://gist.github.com/wathiede/7982372
|
|
|
|
imageBuf = &keepFirstN{N: 1 << 20}
|
2013-02-19 05:31:41 +00:00
|
|
|
copyDest = io.MultiWriter(copyDest, imageBuf)
|
2012-11-07 22:54:00 +00:00
|
|
|
}
|
|
|
|
size, err := io.Copy(copyDest, reader)
|
2011-12-03 19:26:42 +00:00
|
|
|
if err != nil {
|
2014-03-15 15:36:52 +00:00
|
|
|
return err
|
2011-12-03 19:26:42 +00:00
|
|
|
}
|
2013-12-24 00:21:19 +00:00
|
|
|
wholeRef := blob.RefFromHash(sha1)
|
2011-12-03 19:26:42 +00:00
|
|
|
|
2013-02-19 05:31:41 +00:00
|
|
|
if imageBuf != nil {
|
2013-03-06 21:54:14 +00:00
|
|
|
if conf, err := images.DecodeConfig(bytes.NewReader(imageBuf.Bytes)); err == nil {
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(keyImageSize.Key(blobRef), keyImageSize.Val(fmt.Sprint(conf.Width), fmt.Sprint(conf.Height)))
|
2013-02-19 05:31:41 +00:00
|
|
|
}
|
|
|
|
if ft, err := schema.FileTime(bytes.NewReader(imageBuf.Bytes)); err == nil {
|
2013-08-04 02:54:30 +00:00
|
|
|
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
|
2013-02-19 05:31:41 +00:00
|
|
|
times = append(times, ft)
|
|
|
|
} else {
|
2013-08-04 02:54:30 +00:00
|
|
|
log.Printf("filename %q exif = %v, %v", b.FileName(), ft, err)
|
2013-02-19 05:31:41 +00:00
|
|
|
}
|
2013-12-24 00:21:19 +00:00
|
|
|
|
|
|
|
indexEXIF(wholeRef, imageBuf.Bytes, mm)
|
2013-02-19 05:31:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var sortTimes []time.Time
|
|
|
|
for _, t := range times {
|
|
|
|
if !t.IsZero() {
|
|
|
|
sortTimes = append(sortTimes, t)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Sort(types.ByTime(sortTimes))
|
|
|
|
var time3339s string
|
|
|
|
switch {
|
|
|
|
case len(sortTimes) == 1:
|
|
|
|
time3339s = types.Time3339(sortTimes[0]).String()
|
|
|
|
case len(sortTimes) >= 2:
|
|
|
|
oldest, newest := sortTimes[0], sortTimes[len(sortTimes)-1]
|
|
|
|
time3339s = types.Time3339(oldest).String() + "," + types.Time3339(newest).String()
|
|
|
|
}
|
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(keyWholeToFileRef.Key(wholeRef, blobRef), "1")
|
|
|
|
mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(size, b.FileName(), mime))
|
|
|
|
mm.Set(keyFileTimes.Key(blobRef), keyFileTimes.Val(time3339s))
|
2013-08-27 01:18:38 +00:00
|
|
|
|
|
|
|
if strings.HasPrefix(mime, "audio/") {
|
2014-01-21 03:31:15 +00:00
|
|
|
indexMusic(io.NewSectionReader(fr, 0, fr.Size()), wholeRef, mm)
|
2013-08-27 01:18:38 +00:00
|
|
|
}
|
|
|
|
|
2011-12-03 19:26:42 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-12-24 00:21:19 +00:00
|
|
|
func tagFormatString(tag *tiff.Tag) string {
|
|
|
|
switch tag.Format() {
|
|
|
|
case tiff.IntVal:
|
|
|
|
return "int"
|
|
|
|
case tiff.RatVal:
|
|
|
|
return "rat"
|
|
|
|
case tiff.FloatVal:
|
|
|
|
return "float"
|
|
|
|
case tiff.StringVal:
|
|
|
|
return "string"
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
type exifWalkFunc func(name exif.FieldName, tag *tiff.Tag) error
|
|
|
|
|
|
|
|
func (f exifWalkFunc) Walk(name exif.FieldName, tag *tiff.Tag) error { return f(name, tag) }
|
|
|
|
|
|
|
|
func indexEXIF(wholeRef blob.Ref, header []byte, mm *mutationMap) {
|
|
|
|
ex, err := exif.Decode(bytes.NewReader(header))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
// The EXIF library panics if you access a field past
|
|
|
|
// what the file contains. Be paranoid and just
|
|
|
|
// recover here, instead of crashing on an invalid
|
|
|
|
// EXIF file.
|
|
|
|
if e := recover(); e != nil {
|
|
|
|
log.Printf("Ignoring invalid EXIF file. Caught panic: %v", e)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
ex.Walk(exifWalkFunc(func(name exif.FieldName, tag *tiff.Tag) error {
|
|
|
|
tagFmt := tagFormatString(tag)
|
|
|
|
if tagFmt == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
key := keyEXIFTag.Key(wholeRef, fmt.Sprintf("%04x", tag.Id))
|
|
|
|
numComp := int(tag.Ncomp)
|
|
|
|
if tag.Format() == tiff.StringVal {
|
|
|
|
numComp = 1
|
|
|
|
}
|
|
|
|
var val bytes.Buffer
|
|
|
|
val.WriteString(keyEXIFTag.Val(tagFmt, numComp, ""))
|
|
|
|
if tag.Format() == tiff.StringVal {
|
|
|
|
str := tag.StringVal()
|
|
|
|
if containsUnsafeRawStrByte(str) {
|
|
|
|
val.WriteString(urle(str))
|
|
|
|
} else {
|
|
|
|
val.WriteString(str)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for i := 0; i < int(tag.Ncomp); i++ {
|
|
|
|
if i > 0 {
|
|
|
|
val.WriteByte('|')
|
|
|
|
}
|
|
|
|
switch tagFmt {
|
|
|
|
case "int":
|
|
|
|
fmt.Fprintf(&val, "%d", tag.Int(i))
|
|
|
|
case "rat":
|
|
|
|
n, d := tag.Rat2(i)
|
|
|
|
fmt.Fprintf(&val, "%d/%d", n, d)
|
|
|
|
case "float":
|
|
|
|
fmt.Fprintf(&val, "%v", tag.Float(i))
|
|
|
|
default:
|
|
|
|
panic("shouldn't get here")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
valStr := val.String()
|
|
|
|
mm.Set(key, valStr)
|
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
|
|
|
|
longTag, err := ex.Get(exif.FieldName("GPSLongitude"))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ewTag, err := ex.Get(exif.FieldName("GPSLongitudeRef"))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
latTag, err := ex.Get(exif.FieldName("GPSLatitude"))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
nsTag, err := ex.Get(exif.FieldName("GPSLatitudeRef"))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
long := tagDegrees(longTag)
|
|
|
|
lat := tagDegrees(latTag)
|
|
|
|
if ewTag.StringVal() == "W" {
|
|
|
|
long *= -1.0
|
|
|
|
}
|
|
|
|
if nsTag.StringVal() == "S" {
|
|
|
|
lat *= -1.0
|
|
|
|
}
|
|
|
|
mm.Set(keyEXIFGPS.Key(wholeRef), keyEXIFGPS.Val(fmt.Sprint(lat), fmt.Sprint(long)))
|
|
|
|
}
|
|
|
|
|
|
|
|
func ratFloat(num, dem int64) float64 {
|
|
|
|
return float64(num) / float64(dem)
|
|
|
|
}
|
|
|
|
|
|
|
|
func tagDegrees(tag *tiff.Tag) float64 {
|
|
|
|
return ratFloat(tag.Rat2(0)) + ratFloat(tag.Rat2(1))/60 + ratFloat(tag.Rat2(2))/3600
|
|
|
|
}
|
|
|
|
|
2014-01-21 03:31:15 +00:00
|
|
|
// indexMusic adds mutations to index the wholeRef by attached metadata and other properties.
|
2014-01-24 05:43:12 +00:00
|
|
|
func indexMusic(r types.SizeReaderAt, wholeRef blob.Ref, mm *mutationMap) {
|
|
|
|
tag, err := taglib.Decode(r, r.Size())
|
2014-01-21 03:31:15 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Print("index: error parsing tag: ", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var footerLength int64 = 0
|
2014-01-24 05:43:12 +00:00
|
|
|
if hasTag, err := media.HasID3v1Tag(r); err != nil {
|
|
|
|
log.Print("index: unable to check for ID3v1 tag: ", err)
|
|
|
|
return
|
|
|
|
} else if hasTag {
|
|
|
|
footerLength = media.ID3v1TagLength
|
2014-01-21 03:31:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a hash of the audio portion of the file (i.e. excluding ID3v1 and v2 tags).
|
2014-01-24 05:43:12 +00:00
|
|
|
audioStart := int64(tag.TagSize())
|
|
|
|
audioSize := r.Size() - audioStart - footerLength
|
2014-01-21 03:31:15 +00:00
|
|
|
hash := sha1.New()
|
2014-01-24 05:43:12 +00:00
|
|
|
if _, err := io.Copy(hash, io.NewSectionReader(r, audioStart, audioSize)); err != nil {
|
|
|
|
log.Print("index: error generating SHA1 from audio data: ", err)
|
2014-01-21 03:31:15 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
mediaRef := blob.RefFromHash(hash)
|
2013-08-27 01:18:38 +00:00
|
|
|
|
2014-01-24 05:43:12 +00:00
|
|
|
duration, err := media.GetMPEGAudioDuration(io.NewSectionReader(r, audioStart, audioSize))
|
|
|
|
if err != nil {
|
|
|
|
log.Print("index: unable to calculate audio duration: ", err)
|
|
|
|
duration = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
var yearStr, trackStr, discStr, durationStr string
|
2013-08-27 01:18:38 +00:00
|
|
|
if !tag.Year().IsZero() {
|
2014-01-21 03:31:15 +00:00
|
|
|
const justYearLayout = "2006"
|
2013-08-27 01:18:38 +00:00
|
|
|
yearStr = tag.Year().Format(justYearLayout)
|
|
|
|
}
|
|
|
|
if tag.Track() != 0 {
|
|
|
|
trackStr = fmt.Sprintf("%d", tag.Track())
|
|
|
|
}
|
2014-01-21 03:31:15 +00:00
|
|
|
if tag.Disc() != 0 {
|
|
|
|
discStr = fmt.Sprintf("%d", tag.Disc())
|
|
|
|
}
|
2014-01-24 05:43:12 +00:00
|
|
|
if duration != 0 {
|
|
|
|
durationStr = fmt.Sprintf("%d", duration/time.Millisecond)
|
|
|
|
}
|
2013-08-27 01:18:38 +00:00
|
|
|
|
2014-01-20 23:47:21 +00:00
|
|
|
// Note: if you add to this map, please update
|
|
|
|
// pkg/search/query.go's MediaTagConstraint Tag docs.
|
2013-08-27 01:18:38 +00:00
|
|
|
tags := map[string]string{
|
2014-01-24 05:43:12 +00:00
|
|
|
"title": tag.Title(),
|
|
|
|
"artist": tag.Artist(),
|
|
|
|
"album": tag.Album(),
|
|
|
|
"genre": tag.Genre(),
|
|
|
|
"year": yearStr,
|
|
|
|
"track": trackStr,
|
|
|
|
"disc": discStr,
|
|
|
|
"mediaref": mediaRef.String(),
|
|
|
|
"durationms": durationStr,
|
2013-08-27 01:18:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for tag, value := range tags {
|
|
|
|
if value != "" {
|
2014-01-21 05:05:45 +00:00
|
|
|
mm.Set(keyMediaTag.Key(wholeRef, tag), keyMediaTag.Val(value))
|
2013-08-27 01:18:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
// b: the parsed file schema blob
|
|
|
|
// mm: keys to populate
|
2014-03-14 19:11:08 +00:00
|
|
|
func (ix *Index) populateDir(fetcher blob.Fetcher, b *schema.Blob, mm *mutationMap) error {
|
2013-08-04 02:54:30 +00:00
|
|
|
blobRef := b.BlobRef()
|
2013-02-19 05:31:41 +00:00
|
|
|
// TODO(bradfitz): move the NewDirReader and FileName method off *schema.Blob and onto
|
2014-03-14 16:14:44 +00:00
|
|
|
// StaticFile/StaticDirectory or something.
|
2013-01-22 17:32:40 +00:00
|
|
|
|
2014-03-14 16:14:44 +00:00
|
|
|
dr, err := b.NewDirReader(fetcher)
|
2013-01-09 15:58:20 +00:00
|
|
|
if err != nil {
|
|
|
|
// TODO(bradfitz): propagate up a transient failure
|
|
|
|
// error type, so we can retry indexing files in the
|
|
|
|
// future if blobs are only temporarily unavailable.
|
|
|
|
log.Printf("index: error indexing directory, creating NewDirReader %s: %v", blobRef, err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
sts, err := dr.StaticSet()
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("index: error indexing directory: can't get StaticSet: %v\n", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(keyFileInfo.Key(blobRef), keyFileInfo.Val(len(sts), b.FileName(), ""))
|
2013-09-10 20:14:53 +00:00
|
|
|
for _, br := range sts {
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(keyStaticDirChild.Key(blobRef, br.String()), "1")
|
2013-09-10 20:14:53 +00:00
|
|
|
}
|
2013-01-09 15:58:20 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-11-18 14:51:47 +00:00
|
|
|
// populateDeleteClaim adds to mm the entries resulting from the delete claim cl.
|
|
|
|
// It is assumed cl is a valid claim, and vr has already been verified.
|
2013-11-27 16:35:21 +00:00
|
|
|
func (ix *Index) populateDeleteClaim(cl schema.Claim, vr *jsonsign.VerifyRequest, mm *mutationMap) {
|
2013-11-18 14:51:47 +00:00
|
|
|
br := cl.Blob().BlobRef()
|
|
|
|
target := cl.Target()
|
|
|
|
if !target.Valid() {
|
|
|
|
log.Print(fmt.Errorf("no valid target for delete claim %v", br))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
meta, err := ix.GetBlobMeta(target)
|
|
|
|
if err != nil {
|
|
|
|
if err == os.ErrNotExist {
|
|
|
|
// TODO: return a dependency error type, to schedule re-indexing in the future
|
|
|
|
}
|
|
|
|
log.Print(fmt.Errorf("Could not get mime type of target blob %v: %v", target, err))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// TODO(mpl): create consts somewhere for "claim" and "permanode" as camliTypes, and use them,
|
|
|
|
// instead of hardcoding. Unless they already exist ? (didn't find them).
|
|
|
|
if meta.CamliType != "permanode" && meta.CamliType != "claim" {
|
|
|
|
log.Print(fmt.Errorf("delete claim target in %v is neither a permanode nor a claim: %v", br, meta.CamliType))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
mm.Set(keyDeleted.Key(target, cl.ClaimDateString(), br), "")
|
|
|
|
if meta.CamliType == "claim" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
recentKey := keyRecentPermanode.Key(vr.SignerKeyId, cl.ClaimDateString(), br)
|
|
|
|
mm.Set(recentKey, target.String())
|
|
|
|
attr, value := cl.Attribute(), cl.Value()
|
|
|
|
claimKey := keyPermanodeClaim.Key(target, vr.SignerKeyId, cl.ClaimDateString(), br)
|
|
|
|
mm.Set(claimKey, keyPermanodeClaim.Val(cl.ClaimType(), attr, value, vr.CamliSigner))
|
|
|
|
}
|
|
|
|
|
2014-04-02 20:39:36 +00:00
|
|
|
func (ix *Index) populateClaim(fetcher *missTrackFetcher, b *schema.Blob, mm *mutationMap) error {
|
2013-08-04 02:54:30 +00:00
|
|
|
br := b.BlobRef()
|
2013-01-22 17:32:40 +00:00
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
claim, ok := b.AsClaim()
|
2013-01-22 17:32:40 +00:00
|
|
|
if !ok {
|
2011-11-27 23:21:26 +00:00
|
|
|
// Skip bogus claim with malformed permanode.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-04-02 20:39:36 +00:00
|
|
|
vr := jsonsign.NewVerificationRequest(b.JSON(), blob.NewSerialFetcher(ix.KeyFetcher, fetcher))
|
2011-11-27 23:21:26 +00:00
|
|
|
if !vr.Verify() {
|
|
|
|
// TODO(bradfitz): ask if the vr.Err.(jsonsign.Error).IsPermanent() and retry
|
|
|
|
// later if it's not permanent? or maybe do this up a level?
|
|
|
|
if vr.Err != nil {
|
|
|
|
return vr.Err
|
|
|
|
}
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
return errors.New("index: populateClaim verification failure")
|
2011-11-27 23:21:26 +00:00
|
|
|
}
|
|
|
|
verifiedKeyId := vr.SignerKeyId
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set("signerkeyid:"+vr.CamliSigner.String(), verifiedKeyId)
|
2011-11-27 23:21:26 +00:00
|
|
|
|
2013-11-18 14:51:47 +00:00
|
|
|
if claim.ClaimType() == string(schema.DeleteClaim) {
|
|
|
|
ix.populateDeleteClaim(claim, vr, mm)
|
2013-11-27 16:35:21 +00:00
|
|
|
mm.noteDelete(claim)
|
2013-11-18 14:51:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
pnbr := claim.ModifiedPermanode()
|
|
|
|
if !pnbr.Valid() {
|
|
|
|
// A different type of claim; not modifying a permanode.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
attr, value := claim.Attribute(), claim.Value()
|
2013-01-22 17:32:40 +00:00
|
|
|
recentKey := keyRecentPermanode.Key(verifiedKeyId, claim.ClaimDateString(), br)
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(recentKey, pnbr.String())
|
2013-11-07 00:09:11 +00:00
|
|
|
claimKey := keyPermanodeClaim.Key(pnbr, verifiedKeyId, claim.ClaimDateString(), br)
|
2013-11-18 00:52:51 +00:00
|
|
|
mm.Set(claimKey, keyPermanodeClaim.Val(claim.ClaimType(), attr, value, vr.CamliSigner))
|
2011-11-29 20:40:33 +00:00
|
|
|
|
2013-01-22 17:32:40 +00:00
|
|
|
if strings.HasPrefix(attr, "camliPath:") {
|
2013-08-04 02:54:30 +00:00
|
|
|
targetRef, ok := blob.Parse(value)
|
|
|
|
if ok {
|
2011-12-01 18:43:57 +00:00
|
|
|
// TODO: deal with set-attribute vs. del-attribute
|
|
|
|
// properly? I think we get it for free when
|
|
|
|
// del-attribute has no Value, but we need to deal
|
|
|
|
// with the case where they explicitly delete the
|
|
|
|
// current value.
|
2013-01-22 17:32:40 +00:00
|
|
|
suffix := attr[len("camliPath:"):]
|
2011-12-01 18:43:57 +00:00
|
|
|
active := "Y"
|
2013-01-22 17:32:40 +00:00
|
|
|
if claim.ClaimType() == "del-attribute" {
|
2011-12-01 18:43:57 +00:00
|
|
|
active = "N"
|
|
|
|
}
|
2011-12-02 02:06:25 +00:00
|
|
|
baseRef := pnbr
|
|
|
|
claimRef := br
|
|
|
|
|
|
|
|
key := keyPathBackward.Key(verifiedKeyId, targetRef, claimRef)
|
2013-01-22 17:32:40 +00:00
|
|
|
val := keyPathBackward.Val(claim.ClaimDateString(), baseRef, active, suffix)
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(key, val)
|
2011-12-02 02:06:25 +00:00
|
|
|
|
2013-01-22 17:32:40 +00:00
|
|
|
key = keyPathForward.Key(verifiedKeyId, baseRef, suffix, claim.ClaimDateString(), claimRef)
|
2011-12-02 02:06:25 +00:00
|
|
|
val = keyPathForward.Val(active, targetRef)
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(key, val)
|
2011-12-01 18:43:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-29 21:36:58 +00:00
|
|
|
if claim.ClaimType() != string(schema.DelAttributeClaim) && IsIndexedAttribute(attr) {
|
2013-01-22 17:32:40 +00:00
|
|
|
key := keySignerAttrValue.Key(verifiedKeyId, attr, value, claim.ClaimDateString(), br)
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(key, keySignerAttrValue.Val(pnbr))
|
2011-11-28 03:29:23 +00:00
|
|
|
}
|
2012-11-03 13:25:48 +00:00
|
|
|
|
2013-11-16 23:00:30 +00:00
|
|
|
if IsBlobReferenceAttribute(attr) {
|
2013-08-04 02:54:30 +00:00
|
|
|
targetRef, ok := blob.Parse(value)
|
|
|
|
if ok {
|
2012-11-03 13:25:48 +00:00
|
|
|
key := keyEdgeBackward.Key(targetRef, pnbr, br)
|
2013-11-14 23:49:52 +00:00
|
|
|
mm.Set(key, keyEdgeBackward.Val("permanode", ""))
|
2012-11-03 13:25:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-27 23:21:26 +00:00
|
|
|
return nil
|
|
|
|
}
|
2011-11-29 20:40:33 +00:00
|
|
|
|
2013-11-27 16:35:21 +00:00
|
|
|
// updateDeletesCache updates the index deletes cache with the cl delete claim.
|
|
|
|
// deleteClaim is trusted to be a valid delete Claim.
|
|
|
|
func (x *Index) updateDeletesCache(deleteClaim schema.Claim) error {
|
|
|
|
target := deleteClaim.Target()
|
|
|
|
deleter := deleteClaim.Blob()
|
|
|
|
when, err := deleter.ClaimDate()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Could not get date of delete claim %v: %v", deleteClaim, err)
|
|
|
|
}
|
|
|
|
targetDeletions := append(x.deletes.m[target],
|
|
|
|
deletion{
|
|
|
|
deleter: deleter.BlobRef(),
|
|
|
|
when: when,
|
|
|
|
})
|
|
|
|
sort.Sort(sort.Reverse(byDeletionDate(targetDeletions)))
|
|
|
|
x.deletes.m[target] = targetDeletions
|
|
|
|
return nil
|
2011-11-29 20:40:33 +00:00
|
|
|
}
|