2013-12-09 06:44:08 +00:00
|
|
|
/*
|
Rename import paths from camlistore.org to perkeep.org.
Part of the project renaming, issue #981.
After this, users will need to mv their $GOPATH/src/camlistore.org to
$GOPATH/src/perkeep.org. Sorry.
This doesn't yet rename the tools like camlistored, camput, camget,
camtool, etc.
Also, this only moves the lru package to internal. More will move to
internal later.
Also, this doesn't yet remove the "/pkg/" directory. That'll likely
happen later.
This updates some docs, but not all.
devcam test now passes again, even with Go 1.10 (which requires vet
checks are clean too). So a bunch of vet tests are fixed in this CL
too, and a bunch of other broken tests are now fixed (introduced from
the past week of merging the CL backlog).
Change-Id: If580db1691b5b99f8ed6195070789b1f44877dd4
2018-01-01 22:41:41 +00:00
|
|
|
Copyright 2013 The Perkeep Authors
|
2013-12-09 06:44:08 +00:00
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2013-11-16 20:55:09 +00:00
|
|
|
package index
|
|
|
|
|
|
|
|
import (
|
2013-12-07 19:08:19 +00:00
|
|
|
"bytes"
|
2017-11-26 09:05:38 +00:00
|
|
|
"context"
|
2013-11-16 23:18:16 +00:00
|
|
|
"errors"
|
2013-11-17 03:40:14 +00:00
|
|
|
"fmt"
|
2013-11-17 17:41:45 +00:00
|
|
|
"log"
|
2013-11-17 03:40:14 +00:00
|
|
|
"os"
|
2013-11-28 00:58:05 +00:00
|
|
|
"runtime"
|
2013-11-27 08:32:24 +00:00
|
|
|
"sort"
|
2013-11-18 03:49:19 +00:00
|
|
|
"strconv"
|
2014-01-20 23:47:21 +00:00
|
|
|
"strings"
|
2013-11-16 20:55:09 +00:00
|
|
|
"sync"
|
2013-11-27 03:47:00 +00:00
|
|
|
"time"
|
2013-11-16 20:55:09 +00:00
|
|
|
|
2018-01-03 05:03:30 +00:00
|
|
|
"perkeep.org/internal/osutil"
|
Rename import paths from camlistore.org to perkeep.org.
Part of the project renaming, issue #981.
After this, users will need to mv their $GOPATH/src/camlistore.org to
$GOPATH/src/perkeep.org. Sorry.
This doesn't yet rename the tools like camlistored, camput, camget,
camtool, etc.
Also, this only moves the lru package to internal. More will move to
internal later.
Also, this doesn't yet remove the "/pkg/" directory. That'll likely
happen later.
This updates some docs, but not all.
devcam test now passes again, even with Go 1.10 (which requires vet
checks are clean too). So a bunch of vet tests are fixed in this CL
too, and a bunch of other broken tests are now fixed (introduced from
the past week of merging the CL backlog).
Change-Id: If580db1691b5b99f8ed6195070789b1f44877dd4
2018-01-01 22:41:41 +00:00
|
|
|
"perkeep.org/pkg/blob"
|
|
|
|
"perkeep.org/pkg/schema"
|
|
|
|
"perkeep.org/pkg/schema/nodeattr"
|
|
|
|
"perkeep.org/pkg/sorted"
|
|
|
|
"perkeep.org/pkg/types/camtypes"
|
2015-11-20 22:27:00 +00:00
|
|
|
|
|
|
|
"go4.org/strutil"
|
|
|
|
"go4.org/syncutil"
|
2013-11-16 20:55:09 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Corpus is an in-memory summary of all of a user's blobs' metadata.
|
2016-04-22 04:34:24 +00:00
|
|
|
//
|
|
|
|
// A Corpus is not safe for concurrent use. Callers should use Lock or RLock
|
|
|
|
// on the parent index instead.
|
2013-11-16 20:55:09 +00:00
|
|
|
type Corpus struct {
|
2013-11-27 08:32:24 +00:00
|
|
|
// building is true at start while scanning all rows in the
|
|
|
|
// index. While building, certain invariants (like things
|
|
|
|
// being sorted) can be temporarily violated and fixed at the
|
|
|
|
// end of scan.
|
|
|
|
building bool
|
|
|
|
|
2018-01-13 21:40:52 +00:00
|
|
|
// hasLegacySHA1 reports whether some SHA-1 blobs are indexed. It is set while
|
|
|
|
//building the corpus from the initial index scan.
|
|
|
|
hasLegacySHA1 bool
|
|
|
|
|
2013-11-17 18:05:40 +00:00
|
|
|
// gen is incremented on every blob received.
|
|
|
|
// It's used as a query cache invalidator.
|
|
|
|
gen int64
|
|
|
|
|
2013-11-30 20:04:04 +00:00
|
|
|
strs map[string]string // interned strings
|
|
|
|
brOfStr map[string]blob.Ref // blob.Parse fast path
|
|
|
|
brInterns int64 // blob.Ref -> blob.Ref, via br method
|
2013-11-28 00:58:05 +00:00
|
|
|
|
2013-11-29 18:47:42 +00:00
|
|
|
blobs map[blob.Ref]*camtypes.BlobMeta
|
2013-11-28 19:55:52 +00:00
|
|
|
sumBlobBytes int64
|
2013-11-17 18:52:37 +00:00
|
|
|
|
2016-08-17 23:45:52 +00:00
|
|
|
// camBlobs maps from camliType ("file") to blobref to the meta.
|
2013-11-17 18:52:37 +00:00
|
|
|
// The value is the same one in blobs.
|
2021-01-17 03:05:35 +00:00
|
|
|
camBlobs map[schema.CamliType]map[blob.Ref]*camtypes.BlobMeta
|
2013-11-17 18:52:37 +00:00
|
|
|
|
2018-01-19 18:53:44 +00:00
|
|
|
// TODO: add GoLLRB to vendor; keep sorted BlobMeta
|
|
|
|
keyId signerFromBlobrefMap
|
|
|
|
|
|
|
|
// signerRefs maps a signer GPG ID to all its signer blobs (because different hashes).
|
2018-01-31 18:30:47 +00:00
|
|
|
signerRefs map[string]SignerRefSet
|
2018-05-08 15:35:43 +00:00
|
|
|
files map[blob.Ref]camtypes.FileInfo // keyed by file or directory schema blob
|
2013-12-24 00:21:19 +00:00
|
|
|
permanodes map[blob.Ref]*PermanodeMeta
|
|
|
|
imageInfo map[blob.Ref]camtypes.ImageInfo // keyed by fileref (not wholeref)
|
|
|
|
fileWholeRef map[blob.Ref]blob.Ref // fileref -> its wholeref (TODO: multi-valued?)
|
|
|
|
gps map[blob.Ref]latLong // wholeRef -> GPS coordinates
|
2017-04-11 13:43:19 +00:00
|
|
|
// dirChildren maps a directory to its (direct) children (static-set entries).
|
|
|
|
dirChildren map[blob.Ref]map[blob.Ref]struct{}
|
|
|
|
// fileParents maps a file or directory to its (direct) parents.
|
|
|
|
fileParents map[blob.Ref]map[blob.Ref]struct{}
|
2013-11-16 23:18:16 +00:00
|
|
|
|
2016-04-04 16:12:50 +00:00
|
|
|
// Lack of edge tracking implementation is issue #707
|
2018-01-06 03:49:59 +00:00
|
|
|
// (https://github.com/perkeep/perkeep/issues/707)
|
2013-12-09 13:15:34 +00:00
|
|
|
|
2014-03-14 01:47:11 +00:00
|
|
|
// claimBack allows hopping backwards from a Claim's Value
|
2013-12-09 13:15:34 +00:00
|
|
|
// when the Value is a blobref. It allows, for example,
|
|
|
|
// finding the parents of camliMember claims. If a permanode
|
|
|
|
// parent set A has a camliMembers B and C, it allows finding
|
|
|
|
// A from either B and C.
|
2014-03-14 01:47:11 +00:00
|
|
|
// The slice is not sorted.
|
|
|
|
claimBack map[blob.Ref][]*camtypes.Claim
|
2013-12-09 13:15:34 +00:00
|
|
|
|
2017-09-10 13:28:34 +00:00
|
|
|
// TODO: use deletedCache instead?
|
2013-11-16 23:18:16 +00:00
|
|
|
deletedBy map[blob.Ref]blob.Ref // key is deleted by value
|
2014-02-13 00:39:53 +00:00
|
|
|
// deletes tracks deletions of claims and permanodes. The key is
|
|
|
|
// the blobref of a claim or permanode. The values, sorted newest first,
|
|
|
|
// contain the blobref of the claim responsible for the deletion, as well
|
|
|
|
// as the date when that deletion happened.
|
|
|
|
deletes map[blob.Ref][]deletion
|
2013-11-16 20:55:09 +00:00
|
|
|
|
2014-04-04 23:23:11 +00:00
|
|
|
mediaTags map[blob.Ref]map[string]string // wholeref -> "album" -> "foo"
|
2014-01-20 23:47:21 +00:00
|
|
|
|
2014-06-11 16:37:08 +00:00
|
|
|
permanodesByTime *lazySortedPermanodes // cache of permanodes sorted by creation time.
|
|
|
|
permanodesByModtime *lazySortedPermanodes // cache of permanodes sorted by modtime.
|
|
|
|
|
2018-04-26 02:10:00 +00:00
|
|
|
// permanodesSetByNodeType maps from a camliNodeType attribute
|
|
|
|
// value to the set of permanodes that ever had that
|
|
|
|
// value. The bool is always true.
|
|
|
|
permanodesSetByNodeType map[string]map[blob.Ref]bool
|
|
|
|
|
2013-11-18 03:49:19 +00:00
|
|
|
// scratch string slice
|
|
|
|
ss []string
|
2013-11-16 20:55:09 +00:00
|
|
|
}
|
|
|
|
|
2018-04-22 15:51:11 +00:00
|
|
|
func (c *Corpus) logf(format string, args ...interface{}) {
|
|
|
|
log.Printf("index/corpus: "+format, args...)
|
|
|
|
}
|
|
|
|
|
2018-01-19 18:53:44 +00:00
|
|
|
// blobMatches reports whether br is in the set.
|
2018-01-31 18:30:47 +00:00
|
|
|
func (srs SignerRefSet) blobMatches(br blob.Ref) bool {
|
2018-01-19 18:53:44 +00:00
|
|
|
for _, v := range srs {
|
|
|
|
if br.EqualString(v) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// signerFromBlobrefMap maps a signer blobRef to the signer's GPG ID (e.g.
|
|
|
|
// 2931A67C26F5ABDA). It is needed because the signer on a claim is represented by
|
|
|
|
// its blobRef, but the same signer could have created claims with different hashes
|
|
|
|
// (e.g. with sha1 and with sha224), so these claims would look as if created by
|
|
|
|
// different signers (because different blobRefs). signerID thus allows the
|
|
|
|
// algorithms to rely on the unique GPG ID of a signer instead of the different
|
|
|
|
// blobRef representations of it. Its value is usually the corpus keyId.
|
|
|
|
type signerFromBlobrefMap map[blob.Ref]string
|
|
|
|
|
2013-12-24 00:21:19 +00:00
|
|
|
type latLong struct {
|
|
|
|
lat, long float64
|
|
|
|
}
|
|
|
|
|
2014-02-13 00:39:53 +00:00
|
|
|
// IsDeleted reports whether the provided blobref (of a permanode or claim) should be considered deleted.
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) IsDeleted(br blob.Ref) bool {
|
2014-02-13 00:39:53 +00:00
|
|
|
for _, v := range c.deletes[br] {
|
2016-04-22 04:34:24 +00:00
|
|
|
if !c.IsDeleted(v.deleter) {
|
2014-02-13 00:39:53 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2013-11-16 20:55:09 +00:00
|
|
|
type PermanodeMeta struct {
|
2013-12-09 13:15:34 +00:00
|
|
|
Claims []*camtypes.Claim // sorted by camtypes.ClaimsByDate
|
2016-01-27 16:09:18 +00:00
|
|
|
|
2016-05-12 04:05:43 +00:00
|
|
|
attr attrValues // attributes from all signers
|
|
|
|
|
2018-01-17 18:02:03 +00:00
|
|
|
// signer maps a signer's GPG ID (e.g. 2931A67C26F5ABDA) to the attrs for this
|
|
|
|
// signer.
|
|
|
|
signer map[string]attrValues
|
2016-05-12 04:05:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type attrValues map[string][]string
|
|
|
|
|
|
|
|
// cacheAttrClaim applies attribute changes from cl.
|
|
|
|
func (m attrValues) cacheAttrClaim(cl *camtypes.Claim) {
|
|
|
|
switch cl.Type {
|
|
|
|
case string(schema.SetAttributeClaim):
|
|
|
|
m[cl.Attr] = []string{cl.Value}
|
|
|
|
case string(schema.AddAttributeClaim):
|
|
|
|
m[cl.Attr] = append(m[cl.Attr], cl.Value)
|
|
|
|
case string(schema.DelAttributeClaim):
|
|
|
|
if cl.Value == "" {
|
|
|
|
delete(m, cl.Attr)
|
|
|
|
} else {
|
|
|
|
a, i := m[cl.Attr], 0
|
|
|
|
for _, v := range a {
|
|
|
|
if v != cl.Value {
|
|
|
|
a[i] = v
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
m[cl.Attr] = a[:i]
|
|
|
|
}
|
|
|
|
}
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// restoreInvariants sorts claims by date and
|
|
|
|
// recalculates latest attributes.
|
2018-01-19 18:53:44 +00:00
|
|
|
func (pm *PermanodeMeta) restoreInvariants(signers signerFromBlobrefMap) error {
|
2016-01-27 16:09:18 +00:00
|
|
|
sort.Sort(camtypes.ClaimPtrsByDate(pm.Claims))
|
2016-05-12 04:05:43 +00:00
|
|
|
pm.attr = make(attrValues)
|
2018-01-17 18:02:03 +00:00
|
|
|
pm.signer = make(map[string]attrValues)
|
2016-01-27 16:09:18 +00:00
|
|
|
for _, cl := range pm.Claims {
|
2018-01-17 18:02:03 +00:00
|
|
|
if err := pm.appendAttrClaim(cl, signers); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
2018-01-17 18:02:03 +00:00
|
|
|
return nil
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// fixupLastClaim fixes invariants on the assumption
|
|
|
|
// that the all but the last element in Claims are sorted by date
|
|
|
|
// and the last element is the only one not yet included in Attrs.
|
2018-01-19 18:53:44 +00:00
|
|
|
func (pm *PermanodeMeta) fixupLastClaim(signers signerFromBlobrefMap) error {
|
2016-05-12 04:05:43 +00:00
|
|
|
if pm.attr != nil {
|
2016-01-27 16:09:18 +00:00
|
|
|
n := len(pm.Claims)
|
|
|
|
if n < 2 || camtypes.ClaimPtrsByDate(pm.Claims).Less(n-2, n-1) {
|
|
|
|
// already sorted, update Attrs from new Claim
|
2018-01-17 18:02:03 +00:00
|
|
|
return pm.appendAttrClaim(pm.Claims[n-1], signers)
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
|
|
|
}
|
2018-01-17 18:02:03 +00:00
|
|
|
return pm.restoreInvariants(signers)
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
|
|
|
|
2016-05-12 04:05:43 +00:00
|
|
|
// appendAttrClaim stores permanode attributes
|
2018-01-17 18:02:03 +00:00
|
|
|
// from cl in pm.attr and pm.signer[signerID[cl.Signer]].
|
2016-05-12 04:05:43 +00:00
|
|
|
// The caller of appendAttrClaim is responsible for calling
|
|
|
|
// it with claims sorted in camtypes.ClaimPtrsByDate order.
|
2018-01-19 18:53:44 +00:00
|
|
|
func (pm *PermanodeMeta) appendAttrClaim(cl *camtypes.Claim, signers signerFromBlobrefMap) error {
|
2018-01-17 18:02:03 +00:00
|
|
|
signer, ok := signers[cl.Signer]
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("claim %v has unknown signer %q", cl.BlobRef, cl.Signer)
|
|
|
|
}
|
|
|
|
sc, ok := pm.signer[signer]
|
2016-05-12 04:05:43 +00:00
|
|
|
if !ok {
|
|
|
|
// Optimize for the case where cl.Signer of all claims are the same.
|
|
|
|
// Instead of having two identical attrValues copies in
|
|
|
|
// pm.attr and pm.signer[cl.Signer],
|
|
|
|
// use a single attrValues
|
|
|
|
// until there is at least a second signer.
|
|
|
|
switch len(pm.signer) {
|
|
|
|
case 0:
|
|
|
|
// Set up signer cache to reference
|
|
|
|
// the existing attrValues.
|
|
|
|
pm.attr.cacheAttrClaim(cl)
|
2018-01-17 18:02:03 +00:00
|
|
|
pm.signer[signer] = pm.attr
|
|
|
|
return nil
|
2016-05-12 04:05:43 +00:00
|
|
|
|
|
|
|
case 1:
|
|
|
|
// pm.signer has exactly one other signer,
|
|
|
|
// and its attrValues entry references pm.attr.
|
|
|
|
// Make a copy of pm.attr
|
|
|
|
// for this other signer now.
|
|
|
|
m := make(attrValues)
|
|
|
|
for a, v := range pm.attr {
|
|
|
|
xv := make([]string, len(v))
|
|
|
|
copy(xv, v)
|
|
|
|
m[a] = xv
|
|
|
|
}
|
|
|
|
|
|
|
|
for sig := range pm.signer {
|
|
|
|
pm.signer[sig] = m
|
|
|
|
break
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
|
|
|
}
|
2016-05-12 04:05:43 +00:00
|
|
|
sc = make(attrValues)
|
2018-01-17 18:02:03 +00:00
|
|
|
pm.signer[signer] = sc
|
2016-05-12 04:05:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pm.attr.cacheAttrClaim(cl)
|
|
|
|
|
|
|
|
// Cache claim in sc only if sc != pm.attr.
|
|
|
|
if len(pm.signer) > 1 {
|
|
|
|
sc.cacheAttrClaim(cl)
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
2018-01-17 18:02:03 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-02 22:28:36 +00:00
|
|
|
// valuesAtSigner returns an attrValues to query permanode attr values at the
|
2018-01-17 18:02:03 +00:00
|
|
|
// given time for the signerFilter, which is the GPG ID of a signer (e.g. 2931A67C26F5ABDA).
|
|
|
|
// It returns (nil, true) if signerFilter is not empty but pm has no
|
|
|
|
// attributes for it (including if signerFilter is unknown).
|
2016-11-02 22:28:36 +00:00
|
|
|
// It returns ok == true if v represents attrValues valid for the specified
|
|
|
|
// parameters.
|
|
|
|
// It returns (nil, false) if neither pm.attr nor pm.signer should be used for
|
|
|
|
// the given time, because e.g. some claims are more recent than this time. In
|
|
|
|
// which case, the caller should resort to querying another source, such as pm.Claims.
|
2016-05-12 04:05:43 +00:00
|
|
|
// The returned map must not be changed by the caller.
|
2016-05-12 04:05:43 +00:00
|
|
|
func (pm *PermanodeMeta) valuesAtSigner(at time.Time,
|
2018-01-17 18:02:03 +00:00
|
|
|
signerFilter string) (v attrValues, ok bool) {
|
2016-05-12 04:05:43 +00:00
|
|
|
|
|
|
|
if pm.attr == nil {
|
|
|
|
return nil, false
|
|
|
|
}
|
2018-01-17 18:02:03 +00:00
|
|
|
|
2016-05-12 04:05:43 +00:00
|
|
|
var m attrValues
|
2018-01-17 18:02:03 +00:00
|
|
|
if signerFilter != "" {
|
2016-05-12 04:05:43 +00:00
|
|
|
m = pm.signer[signerFilter]
|
|
|
|
if m == nil {
|
|
|
|
return nil, true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
m = pm.attr
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
|
|
|
if at.IsZero() {
|
2016-05-12 04:05:43 +00:00
|
|
|
return m, true
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
|
|
|
if n := len(pm.Claims); n == 0 || !pm.Claims[n-1].Date.After(at) {
|
2016-05-12 04:05:43 +00:00
|
|
|
return m, true
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
2016-05-12 04:05:43 +00:00
|
|
|
return nil, false
|
2013-11-16 20:55:09 +00:00
|
|
|
}
|
|
|
|
|
2013-11-16 23:18:16 +00:00
|
|
|
func newCorpus() *Corpus {
|
2014-06-11 16:37:08 +00:00
|
|
|
c := &Corpus{
|
2018-04-26 02:10:00 +00:00
|
|
|
blobs: make(map[blob.Ref]*camtypes.BlobMeta),
|
2021-01-17 03:05:35 +00:00
|
|
|
camBlobs: make(map[schema.CamliType]map[blob.Ref]*camtypes.BlobMeta),
|
2018-04-26 02:10:00 +00:00
|
|
|
files: make(map[blob.Ref]camtypes.FileInfo),
|
|
|
|
permanodes: make(map[blob.Ref]*PermanodeMeta),
|
|
|
|
imageInfo: make(map[blob.Ref]camtypes.ImageInfo),
|
|
|
|
deletedBy: make(map[blob.Ref]blob.Ref),
|
|
|
|
keyId: make(map[blob.Ref]string),
|
|
|
|
signerRefs: make(map[string]SignerRefSet),
|
|
|
|
brOfStr: make(map[string]blob.Ref),
|
|
|
|
fileWholeRef: make(map[blob.Ref]blob.Ref),
|
|
|
|
gps: make(map[blob.Ref]latLong),
|
|
|
|
mediaTags: make(map[blob.Ref]map[string]string),
|
|
|
|
deletes: make(map[blob.Ref][]deletion),
|
|
|
|
claimBack: make(map[blob.Ref][]*camtypes.Claim),
|
|
|
|
permanodesSetByNodeType: make(map[string]map[blob.Ref]bool),
|
2017-04-11 13:43:19 +00:00
|
|
|
dirChildren: make(map[blob.Ref]map[blob.Ref]struct{}),
|
|
|
|
fileParents: make(map[blob.Ref]map[blob.Ref]struct{}),
|
2013-11-16 23:18:16 +00:00
|
|
|
}
|
2014-06-11 16:37:08 +00:00
|
|
|
c.permanodesByModtime = &lazySortedPermanodes{
|
|
|
|
c: c,
|
2016-04-22 04:34:24 +00:00
|
|
|
pnTime: c.PermanodeModtime,
|
2014-06-11 16:37:08 +00:00
|
|
|
}
|
|
|
|
c.permanodesByTime = &lazySortedPermanodes{
|
|
|
|
c: c,
|
2016-04-22 04:34:24 +00:00
|
|
|
pnTime: c.PermanodeAnyTime,
|
2014-06-11 16:37:08 +00:00
|
|
|
}
|
|
|
|
return c
|
2013-11-16 23:18:16 +00:00
|
|
|
}
|
|
|
|
|
2013-11-23 07:24:54 +00:00
|
|
|
func NewCorpusFromStorage(s sorted.KeyValue) (*Corpus, error) {
|
2013-11-16 23:18:16 +00:00
|
|
|
if s == nil {
|
|
|
|
return nil, errors.New("storage is nil")
|
|
|
|
}
|
|
|
|
c := newCorpus()
|
2013-11-17 17:41:45 +00:00
|
|
|
return c, c.scanFromStorage(s)
|
2013-11-16 20:55:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (x *Index) KeepInMemory() (*Corpus, error) {
|
|
|
|
var err error
|
|
|
|
x.corpus, err = NewCorpusFromStorage(x.s)
|
|
|
|
return x.corpus, err
|
|
|
|
}
|
2013-11-17 01:24:02 +00:00
|
|
|
|
2013-11-17 02:50:01 +00:00
|
|
|
// PreventStorageAccessForTesting causes any access to the index's underlying
|
2013-11-25 01:45:45 +00:00
|
|
|
// Storage interface to panic.
|
|
|
|
func (x *Index) PreventStorageAccessForTesting() {
|
|
|
|
x.s = crashStorage{}
|
2013-11-17 02:50:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type crashStorage struct {
|
2013-11-23 07:24:54 +00:00
|
|
|
sorted.KeyValue
|
2013-11-17 02:50:01 +00:00
|
|
|
}
|
|
|
|
|
2013-11-25 01:45:45 +00:00
|
|
|
func (crashStorage) Get(key string) (string, error) {
|
2013-11-23 07:24:54 +00:00
|
|
|
panic(fmt.Sprintf("unexpected KeyValue.Get(%q) called", key))
|
2013-11-17 02:50:01 +00:00
|
|
|
}
|
|
|
|
|
2013-12-07 16:43:18 +00:00
|
|
|
func (crashStorage) Find(start, end string) sorted.Iterator {
|
|
|
|
panic(fmt.Sprintf("unexpected KeyValue.Find(%q, %q) called", start, end))
|
2013-11-17 02:50:01 +00:00
|
|
|
}
|
|
|
|
|
2013-11-17 17:41:45 +00:00
|
|
|
// *********** Updating the corpus
|
|
|
|
|
2013-12-07 19:08:19 +00:00
|
|
|
var corpusMergeFunc = map[string]func(c *Corpus, k, v []byte) error{
|
2017-04-11 13:43:19 +00:00
|
|
|
"have": nil, // redundant with "meta"
|
|
|
|
"recpn": nil, // unneeded.
|
|
|
|
"meta": (*Corpus).mergeMetaRow,
|
|
|
|
keySignerKeyID.name: (*Corpus).mergeSignerKeyIdRow,
|
|
|
|
"claim": (*Corpus).mergeClaimRow,
|
|
|
|
"fileinfo": (*Corpus).mergeFileInfoRow,
|
|
|
|
keyFileTimes.name: (*Corpus).mergeFileTimesRow,
|
|
|
|
"imagesize": (*Corpus).mergeImageSizeRow,
|
|
|
|
"wholetofile": (*Corpus).mergeWholeToFileRow,
|
|
|
|
"exifgps": (*Corpus).mergeEXIFGPSRow,
|
|
|
|
"exiftag": nil, // not using any for now
|
|
|
|
"signerattrvalue": nil, // ignoring for now
|
|
|
|
"mediatag": (*Corpus).mergeMediaTag,
|
|
|
|
keyStaticDirChild.name: (*Corpus).mergeStaticDirChildRow,
|
2013-11-18 03:49:19 +00:00
|
|
|
}
|
|
|
|
|
2013-11-28 00:58:05 +00:00
|
|
|
func memstats() *runtime.MemStats {
|
|
|
|
ms := new(runtime.MemStats)
|
|
|
|
runtime.GC()
|
|
|
|
runtime.ReadMemStats(ms)
|
|
|
|
return ms
|
|
|
|
}
|
|
|
|
|
2013-12-04 05:38:03 +00:00
|
|
|
var logCorpusStats = true // set to false in tests
|
|
|
|
|
2013-12-30 00:27:32 +00:00
|
|
|
var slurpPrefixes = []string{
|
|
|
|
"meta:", // must be first
|
2018-01-19 18:53:44 +00:00
|
|
|
keySignerKeyID.name + ":",
|
|
|
|
|
|
|
|
// the first two above are loaded serially first for dependency reasons, whereas
|
|
|
|
// the others below are loaded concurrently afterwards.
|
2013-12-30 00:27:32 +00:00
|
|
|
"claim|",
|
|
|
|
"fileinfo|",
|
pkg/index: ignore unset msdos time when possible
If a zip archive is created without specifying the modtimes of the
files, they'll end up with a default modtime set to the MSDOS epoch
(1980-01-01 modulo some timezone and silly details), which is a common
enough occurrence.
Even when the index has a better information, such as the EXIF time,
when clients of the index (the web UI, through the search package) sort
by creation time, they use the oldest indexed time available, which is
unfortunate in that case.
Therefore, this CL makes the indexer ignore the oldest time found, if it
is before the MSDOS epoch, and if we have another time available, when
receiving a file.
Also fixed the use of hardcoded value of keyFileTimes.name, to help with
reading/searching code.
Change-Id: I9c2c39b319fdf6cd5214cab8928dd025451077ac
2017-03-13 16:59:30 +00:00
|
|
|
keyFileTimes.name + "|",
|
2013-12-30 00:27:32 +00:00
|
|
|
"imagesize|",
|
|
|
|
"wholetofile|",
|
|
|
|
"exifgps|",
|
2014-01-21 05:05:45 +00:00
|
|
|
"mediatag|",
|
2017-04-11 13:43:19 +00:00
|
|
|
keyStaticDirChild.name + "|",
|
2013-12-30 00:27:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Key types (without trailing punctuation) that we slurp to memory at start.
|
|
|
|
var slurpedKeyType = make(map[string]bool)
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
for _, prefix := range slurpPrefixes {
|
|
|
|
slurpedKeyType[typeOfKey(prefix)] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-23 07:24:54 +00:00
|
|
|
func (c *Corpus) scanFromStorage(s sorted.KeyValue) error {
|
2013-11-27 08:32:24 +00:00
|
|
|
c.building = true
|
2013-11-28 00:58:05 +00:00
|
|
|
|
2013-12-04 05:38:03 +00:00
|
|
|
var ms0 *runtime.MemStats
|
|
|
|
if logCorpusStats {
|
|
|
|
ms0 = memstats()
|
2018-04-22 15:51:11 +00:00
|
|
|
c.logf("loading into memory...")
|
|
|
|
c.logf("loading into memory... (1/%d: meta rows)", len(slurpPrefixes))
|
2013-12-04 05:38:03 +00:00
|
|
|
}
|
2013-11-30 20:04:04 +00:00
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
scanmu := new(sync.Mutex)
|
|
|
|
|
2013-11-30 20:04:04 +00:00
|
|
|
// We do the "meta" rows first, before the prefixes below, because it
|
|
|
|
// populates the blobs map (used for blobref interning) and the camBlobs
|
|
|
|
// map (used for hinting the size of other maps)
|
2016-04-22 04:34:24 +00:00
|
|
|
if err := c.scanPrefix(scanmu, s, "meta:"); err != nil {
|
2013-11-30 20:04:04 +00:00
|
|
|
return err
|
|
|
|
}
|
2018-01-19 18:53:44 +00:00
|
|
|
|
|
|
|
// we do the keyIDs first, because they're necessary to properly merge claims
|
|
|
|
if err := c.scanPrefix(scanmu, s, keySignerKeyID.name+":"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-01-17 03:05:35 +00:00
|
|
|
c.files = make(map[blob.Ref]camtypes.FileInfo, len(c.camBlobs[schema.TypeFile]))
|
|
|
|
c.permanodes = make(map[blob.Ref]*PermanodeMeta, len(c.camBlobs[schema.TypePermanode]))
|
2013-11-30 20:04:04 +00:00
|
|
|
cpu0 := osutil.CPUUsage()
|
|
|
|
|
2013-12-08 03:11:31 +00:00
|
|
|
var grp syncutil.Group
|
2018-01-19 18:53:44 +00:00
|
|
|
for i, prefix := range slurpPrefixes[2:] {
|
2013-12-06 06:06:18 +00:00
|
|
|
if logCorpusStats {
|
2018-04-22 15:51:11 +00:00
|
|
|
c.logf("loading into memory... (%d/%d: prefix %q)", i+2, len(slurpPrefixes),
|
2013-12-08 03:11:31 +00:00
|
|
|
prefix[:len(prefix)-1])
|
2013-11-17 17:41:45 +00:00
|
|
|
}
|
2013-12-08 03:11:31 +00:00
|
|
|
prefix := prefix
|
2016-04-22 04:34:24 +00:00
|
|
|
grp.Go(func() error { return c.scanPrefix(scanmu, s, prefix) })
|
2013-12-08 03:11:31 +00:00
|
|
|
}
|
|
|
|
if err := grp.Err(); err != nil {
|
|
|
|
return err
|
2013-11-17 01:24:02 +00:00
|
|
|
}
|
2013-11-27 08:32:24 +00:00
|
|
|
|
2013-11-29 18:47:42 +00:00
|
|
|
// Post-load optimizations and restoration of invariants.
|
2013-11-27 08:32:24 +00:00
|
|
|
for _, pm := range c.permanodes {
|
2013-11-29 18:47:42 +00:00
|
|
|
// Restore invariants violated during building:
|
2018-01-17 18:02:03 +00:00
|
|
|
if err := pm.restoreInvariants(c.keyId); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-29 18:47:42 +00:00
|
|
|
|
|
|
|
// And intern some stuff.
|
2013-12-09 13:15:34 +00:00
|
|
|
for _, cl := range pm.Claims {
|
2013-11-29 18:47:42 +00:00
|
|
|
cl.BlobRef = c.br(cl.BlobRef)
|
|
|
|
cl.Signer = c.br(cl.Signer)
|
|
|
|
cl.Permanode = c.br(cl.Permanode)
|
|
|
|
cl.Target = c.br(cl.Target)
|
|
|
|
}
|
2013-11-27 08:32:24 +00:00
|
|
|
}
|
2013-11-30 20:04:04 +00:00
|
|
|
c.brOfStr = nil // drop this now.
|
2013-11-27 08:32:24 +00:00
|
|
|
c.building = false
|
2013-11-29 18:47:42 +00:00
|
|
|
// log.V(1).Printf("interned blob.Ref = %d", c.brInterns)
|
2013-11-27 08:32:24 +00:00
|
|
|
|
2014-02-13 00:39:53 +00:00
|
|
|
if err := c.initDeletes(s); err != nil {
|
|
|
|
return fmt.Errorf("Could not populate the corpus deletes: %v", err)
|
|
|
|
}
|
|
|
|
|
2013-12-04 05:38:03 +00:00
|
|
|
if logCorpusStats {
|
|
|
|
cpu := osutil.CPUUsage() - cpu0
|
|
|
|
ms1 := memstats()
|
|
|
|
memUsed := ms1.Alloc - ms0.Alloc
|
|
|
|
if ms1.Alloc < ms0.Alloc {
|
|
|
|
memUsed = 0
|
|
|
|
}
|
2018-04-22 15:51:11 +00:00
|
|
|
c.logf("stats: %.3f MiB mem: %d blobs (%.3f GiB) (%d schema (%d permanode, %d file (%d image), ...)",
|
2013-12-04 05:38:03 +00:00
|
|
|
float64(memUsed)/(1<<20),
|
|
|
|
len(c.blobs),
|
|
|
|
float64(c.sumBlobBytes)/(1<<30),
|
2016-04-22 04:34:24 +00:00
|
|
|
c.numSchemaBlobs(),
|
2013-12-04 05:38:03 +00:00
|
|
|
len(c.permanodes),
|
|
|
|
len(c.files),
|
|
|
|
len(c.imageInfo))
|
2018-04-22 15:51:11 +00:00
|
|
|
c.logf("scanning CPU usage: %v", cpu)
|
2013-11-28 00:58:05 +00:00
|
|
|
}
|
2014-02-13 00:39:53 +00:00
|
|
|
|
2013-11-17 01:24:02 +00:00
|
|
|
return nil
|
|
|
|
}
|
2013-11-17 02:50:01 +00:00
|
|
|
|
2014-02-13 00:39:53 +00:00
|
|
|
// initDeletes populates the corpus deletes from the delete entries in s.
|
|
|
|
func (c *Corpus) initDeletes(s sorted.KeyValue) (err error) {
|
|
|
|
it := queryPrefix(s, keyDeleted)
|
|
|
|
defer closeIterator(it, &err)
|
|
|
|
for it.Next() {
|
|
|
|
cl, ok := kvDeleted(it.Key())
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("Bogus keyDeleted entry key: want |\"deleted\"|<deleted blobref>|<reverse claimdate>|<deleter claim>|, got %q", it.Key())
|
|
|
|
}
|
|
|
|
targetDeletions := append(c.deletes[cl.Target],
|
|
|
|
deletion{
|
|
|
|
deleter: cl.BlobRef,
|
|
|
|
when: cl.Date,
|
|
|
|
})
|
|
|
|
sort.Sort(sort.Reverse(byDeletionDate(targetDeletions)))
|
|
|
|
c.deletes[cl.Target] = targetDeletions
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) numSchemaBlobs() (n int64) {
|
2013-11-28 19:55:52 +00:00
|
|
|
for _, m := range c.camBlobs {
|
|
|
|
n += int64(len(m))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) scanPrefix(mu *sync.Mutex, s sorted.KeyValue, prefix string) (err error) {
|
2013-12-04 19:58:59 +00:00
|
|
|
typeKey := typeOfKey(prefix)
|
|
|
|
fn, ok := corpusMergeFunc[typeKey]
|
2013-11-17 03:40:14 +00:00
|
|
|
if !ok {
|
2013-11-17 17:41:45 +00:00
|
|
|
panic("No registered merge func for prefix " + prefix)
|
2013-11-17 03:40:14 +00:00
|
|
|
}
|
2013-12-04 19:58:59 +00:00
|
|
|
|
2013-12-08 03:11:31 +00:00
|
|
|
n, t0 := 0, time.Now()
|
2013-11-17 17:41:45 +00:00
|
|
|
it := queryPrefixString(s, prefix)
|
|
|
|
defer closeIterator(it, &err)
|
|
|
|
for it.Next() {
|
2013-12-08 03:11:31 +00:00
|
|
|
n++
|
|
|
|
if n == 1 {
|
2016-04-22 04:34:24 +00:00
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
2013-12-08 03:11:31 +00:00
|
|
|
}
|
2018-01-19 18:53:44 +00:00
|
|
|
if typeKey == keySignerKeyID.name {
|
|
|
|
signerBlobRef, ok := blob.Parse(strings.TrimPrefix(it.Key(), keySignerKeyID.name+":"))
|
|
|
|
if !ok {
|
2018-04-22 15:51:11 +00:00
|
|
|
c.logf("WARNING: bogus signer blob in %v row: %q", keySignerKeyID.name, it.Key())
|
2018-01-19 18:53:44 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err := c.addKeyID(&mutationMap{
|
|
|
|
signerBlobRef: signerBlobRef,
|
|
|
|
signerID: it.Value(),
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := fn(c, it.KeyBytes(), it.ValueBytes()); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-17 17:41:45 +00:00
|
|
|
}
|
|
|
|
}
|
2013-12-08 03:11:31 +00:00
|
|
|
if logCorpusStats {
|
|
|
|
d := time.Since(t0)
|
2018-04-22 18:19:42 +00:00
|
|
|
c.logf("loaded prefix %q: %d rows, %v", prefix[:len(prefix)-1], n, d)
|
2013-12-08 03:11:31 +00:00
|
|
|
}
|
2013-11-17 17:41:45 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-17 18:02:03 +00:00
|
|
|
func (c *Corpus) addKeyID(mm *mutationMap) error {
|
|
|
|
if mm.signerID == "" || !mm.signerBlobRef.Valid() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
id, ok := c.keyId[mm.signerBlobRef]
|
2018-01-19 18:53:44 +00:00
|
|
|
// only add it if we don't already have it, to save on allocs.
|
|
|
|
if ok {
|
|
|
|
if id != mm.signerID {
|
|
|
|
return fmt.Errorf("GPG ID mismatch for signer %q: refusing to overwrite %v with %v", mm.signerBlobRef, id, mm.signerID)
|
|
|
|
}
|
|
|
|
return nil
|
2018-01-17 18:02:03 +00:00
|
|
|
}
|
2018-01-19 18:53:44 +00:00
|
|
|
c.signerRefs[mm.signerID] = append(c.signerRefs[mm.signerID], mm.signerBlobRef.String())
|
|
|
|
return c.mergeSignerKeyIdRow([]byte("signerkeyid:"+mm.signerBlobRef.String()), []byte(mm.signerID))
|
2018-01-17 18:02:03 +00:00
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) addBlob(ctx context.Context, br blob.Ref, mm *mutationMap) error {
|
2013-12-31 04:17:47 +00:00
|
|
|
if _, dup := c.blobs[br]; dup {
|
|
|
|
return nil
|
|
|
|
}
|
2013-11-17 18:05:40 +00:00
|
|
|
c.gen++
|
2018-01-17 18:02:03 +00:00
|
|
|
// make sure keySignerKeyID is done first before the actual mutations, even
|
|
|
|
// though it's also going to be done in the loop below.
|
|
|
|
if err := c.addKeyID(mm); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-27 16:35:21 +00:00
|
|
|
for k, v := range mm.kv {
|
2013-11-17 17:41:45 +00:00
|
|
|
kt := typeOfKey(k)
|
2018-01-19 18:53:44 +00:00
|
|
|
if kt == keySignerKeyID.name {
|
|
|
|
// because we already took care of it in addKeyID
|
|
|
|
continue
|
|
|
|
}
|
2013-12-30 00:27:32 +00:00
|
|
|
if !slurpedKeyType[kt] {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if err := corpusMergeFunc[kt](c, []byte(k), []byte(v)); err != nil {
|
|
|
|
return err
|
2013-11-17 17:41:45 +00:00
|
|
|
}
|
|
|
|
}
|
2014-02-13 00:39:53 +00:00
|
|
|
for _, cl := range mm.deletes {
|
|
|
|
if err := c.updateDeletes(cl); err != nil {
|
|
|
|
return fmt.Errorf("Could not update the deletes cache after deletion from %v: %v", cl, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateDeletes updates the corpus deletes with the delete claim deleteClaim.
|
|
|
|
// deleteClaim is trusted to be a valid delete Claim.
|
|
|
|
func (c *Corpus) updateDeletes(deleteClaim schema.Claim) error {
|
|
|
|
target := c.br(deleteClaim.Target())
|
|
|
|
deleter := deleteClaim.Blob()
|
|
|
|
when, err := deleter.ClaimDate()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Could not get date of delete claim %v: %v", deleteClaim, err)
|
|
|
|
}
|
|
|
|
del := deletion{
|
|
|
|
deleter: c.br(deleter.BlobRef()),
|
|
|
|
when: when,
|
|
|
|
}
|
|
|
|
for _, v := range c.deletes[target] {
|
|
|
|
if v == del {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
targetDeletions := append(c.deletes[target], del)
|
|
|
|
sort.Sort(sort.Reverse(byDeletionDate(targetDeletions)))
|
|
|
|
c.deletes[target] = targetDeletions
|
2013-11-17 17:41:45 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-12-07 19:08:19 +00:00
|
|
|
func (c *Corpus) mergeMetaRow(k, v []byte) error {
|
2013-12-04 19:58:59 +00:00
|
|
|
bm, ok := kvBlobMeta_bytes(k, v)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("bogus meta row: %q -> %q", k, v)
|
|
|
|
}
|
|
|
|
return c.mergeBlobMeta(bm)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Corpus) mergeBlobMeta(bm camtypes.BlobMeta) error {
|
2013-11-28 19:55:52 +00:00
|
|
|
if _, dup := c.blobs[bm.Ref]; dup {
|
2013-12-31 04:17:47 +00:00
|
|
|
panic("dup blob seen")
|
2013-11-28 19:55:52 +00:00
|
|
|
}
|
2021-01-17 03:05:35 +00:00
|
|
|
bm.CamliType = schema.CamliType((c.str(string(bm.CamliType))))
|
2013-11-29 18:47:42 +00:00
|
|
|
|
2013-11-17 18:52:37 +00:00
|
|
|
c.blobs[bm.Ref] = &bm
|
2013-11-28 19:55:52 +00:00
|
|
|
c.sumBlobBytes += int64(bm.Size)
|
2013-11-17 18:52:37 +00:00
|
|
|
if bm.CamliType != "" {
|
|
|
|
m, ok := c.camBlobs[bm.CamliType]
|
|
|
|
if !ok {
|
|
|
|
m = make(map[blob.Ref]*camtypes.BlobMeta)
|
|
|
|
c.camBlobs[bm.CamliType] = m
|
|
|
|
}
|
|
|
|
m[bm.Ref] = &bm
|
|
|
|
}
|
2013-11-17 17:41:45 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-12-07 19:08:19 +00:00
|
|
|
func (c *Corpus) mergeSignerKeyIdRow(k, v []byte) error {
|
|
|
|
br, ok := blob.ParseBytes(k[len("signerkeyid:"):])
|
2013-11-17 17:41:45 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("bogus signerid row: %q -> %q", k, v)
|
|
|
|
}
|
2013-12-07 19:08:19 +00:00
|
|
|
c.keyId[br] = string(v)
|
2013-11-17 17:41:45 +00:00
|
|
|
return nil
|
2013-11-17 03:40:14 +00:00
|
|
|
}
|
|
|
|
|
2013-12-07 19:08:19 +00:00
|
|
|
func (c *Corpus) mergeClaimRow(k, v []byte) error {
|
|
|
|
// TODO: update kvClaim to take []byte instead of string
|
|
|
|
cl, ok := kvClaim(string(k), string(v), c.blobParse)
|
2013-11-18 00:52:51 +00:00
|
|
|
if !ok || !cl.Permanode.Valid() {
|
|
|
|
return fmt.Errorf("bogus claim row: %q -> %q", k, v)
|
|
|
|
}
|
|
|
|
cl.Type = c.str(cl.Type)
|
|
|
|
cl.Attr = c.str(cl.Attr)
|
|
|
|
cl.Value = c.str(cl.Value) // less likely to intern, but some (tags) do
|
|
|
|
|
2013-11-29 18:47:42 +00:00
|
|
|
pn := c.br(cl.Permanode)
|
2013-11-18 00:52:51 +00:00
|
|
|
pm, ok := c.permanodes[pn]
|
|
|
|
if !ok {
|
|
|
|
pm = new(PermanodeMeta)
|
|
|
|
c.permanodes[pn] = pm
|
|
|
|
}
|
2013-12-09 13:15:34 +00:00
|
|
|
pm.Claims = append(pm.Claims, &cl)
|
2013-11-27 08:32:24 +00:00
|
|
|
if !c.building {
|
|
|
|
// Unless we're still starting up (at which we sort at
|
2016-01-27 16:09:18 +00:00
|
|
|
// the end instead), keep claims sorted and attrs in sync.
|
2018-01-17 18:02:03 +00:00
|
|
|
if err := pm.fixupLastClaim(c.keyId); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-11-27 08:32:24 +00:00
|
|
|
}
|
2014-03-14 01:47:11 +00:00
|
|
|
|
|
|
|
if vbr, ok := blob.Parse(cl.Value); ok {
|
|
|
|
c.claimBack[vbr] = append(c.claimBack[vbr], &cl)
|
|
|
|
}
|
2018-04-26 02:10:00 +00:00
|
|
|
if cl.Attr == "camliNodeType" {
|
|
|
|
set := c.permanodesSetByNodeType[cl.Value]
|
|
|
|
if set == nil {
|
|
|
|
set = make(map[blob.Ref]bool)
|
|
|
|
c.permanodesSetByNodeType[cl.Value] = set
|
|
|
|
}
|
|
|
|
set[pn] = true
|
|
|
|
}
|
2013-11-18 00:52:51 +00:00
|
|
|
return nil
|
2013-11-17 02:50:01 +00:00
|
|
|
}
|
|
|
|
|
2013-12-07 19:08:19 +00:00
|
|
|
func (c *Corpus) mergeFileInfoRow(k, v []byte) error {
|
2013-11-18 03:49:19 +00:00
|
|
|
// fileinfo|sha1-579f7f246bd420d486ddeb0dadbb256cfaf8bf6b" "5|some-stuff.txt|"
|
2013-12-07 19:08:19 +00:00
|
|
|
pipe := bytes.IndexByte(k, '|')
|
|
|
|
if pipe < 0 {
|
2013-11-18 03:49:19 +00:00
|
|
|
return fmt.Errorf("unexpected fileinfo key %q", k)
|
|
|
|
}
|
2013-12-07 19:08:19 +00:00
|
|
|
br, ok := blob.ParseBytes(k[pipe+1:])
|
2013-11-18 03:49:19 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("unexpected fileinfo blobref in key %q", k)
|
|
|
|
}
|
2013-12-07 19:08:19 +00:00
|
|
|
|
|
|
|
// TODO: could at least use strutil.ParseUintBytes to not stringify and retain
|
|
|
|
// the length bytes of v.
|
2015-02-20 16:03:22 +00:00
|
|
|
c.ss = strutil.AppendSplitN(c.ss[:0], string(v), "|", 4)
|
|
|
|
if len(c.ss) != 3 && len(c.ss) != 4 {
|
|
|
|
return fmt.Errorf("unexpected fileinfo value %q", v)
|
2013-11-18 03:49:19 +00:00
|
|
|
}
|
|
|
|
size, err := strconv.ParseInt(c.ss[0], 10, 64)
|
|
|
|
if err != nil {
|
2015-02-20 16:03:22 +00:00
|
|
|
return fmt.Errorf("unexpected fileinfo value %q", v)
|
|
|
|
}
|
|
|
|
var wholeRef blob.Ref
|
|
|
|
if len(c.ss) == 4 && c.ss[3] != "" { // checking for "" because of special files such as symlinks.
|
|
|
|
var ok bool
|
|
|
|
wholeRef, ok = blob.Parse(urld(c.ss[3]))
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid wholeRef blobref in value %q for fileinfo key %q", v, k)
|
|
|
|
}
|
2013-11-18 03:49:19 +00:00
|
|
|
}
|
|
|
|
c.mutateFileInfo(br, func(fi *camtypes.FileInfo) {
|
|
|
|
fi.Size = size
|
2013-11-21 01:44:10 +00:00
|
|
|
fi.FileName = c.str(urld(c.ss[1]))
|
|
|
|
fi.MIMEType = c.str(urld(c.ss[2]))
|
2015-02-20 16:03:22 +00:00
|
|
|
fi.WholeRef = wholeRef
|
2013-11-18 03:49:19 +00:00
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-11 13:43:19 +00:00
|
|
|
func (c *Corpus) mergeStaticDirChildRow(k, v []byte) error {
|
|
|
|
// dirchild|sha1-dir|sha1-child" "1"
|
|
|
|
// strip the key name
|
|
|
|
sk := k[len(keyStaticDirChild.name)+1:]
|
|
|
|
pipe := bytes.IndexByte(sk, '|')
|
|
|
|
if pipe < 0 {
|
|
|
|
return fmt.Errorf("invalid dirchild key %q, missing second pipe", k)
|
|
|
|
}
|
|
|
|
parent, ok := blob.ParseBytes(sk[:pipe])
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid dirchild parent blobref in key %q", k)
|
|
|
|
}
|
|
|
|
child, ok := blob.ParseBytes(sk[pipe+1:])
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid dirchild child blobref in key %q", k)
|
|
|
|
}
|
|
|
|
parent = c.br(parent)
|
|
|
|
child = c.br(child)
|
|
|
|
children, ok := c.dirChildren[parent]
|
|
|
|
if !ok {
|
|
|
|
children = make(map[blob.Ref]struct{})
|
|
|
|
}
|
|
|
|
children[child] = struct{}{}
|
|
|
|
c.dirChildren[parent] = children
|
|
|
|
parents, ok := c.fileParents[child]
|
|
|
|
if !ok {
|
|
|
|
parents = make(map[blob.Ref]struct{})
|
|
|
|
}
|
|
|
|
parents[parent] = struct{}{}
|
|
|
|
c.fileParents[child] = parents
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-12-07 19:08:19 +00:00
|
|
|
func (c *Corpus) mergeFileTimesRow(k, v []byte) error {
|
|
|
|
if len(v) == 0 {
|
2013-11-18 03:49:19 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// "filetimes|sha1-579f7f246bd420d486ddeb0dadbb256cfaf8bf6b" "1970-01-01T00%3A02%3A03Z"
|
2013-12-07 19:08:19 +00:00
|
|
|
pipe := bytes.IndexByte(k, '|')
|
|
|
|
if pipe < 0 {
|
|
|
|
return fmt.Errorf("unexpected fileinfo key %q", k)
|
2013-11-18 03:49:19 +00:00
|
|
|
}
|
2013-12-07 19:08:19 +00:00
|
|
|
br, ok := blob.ParseBytes(k[pipe+1:])
|
2013-11-18 03:49:19 +00:00
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("unexpected filetimes blobref in key %q", k)
|
|
|
|
}
|
2014-04-30 00:07:32 +00:00
|
|
|
c.ss = strutil.AppendSplitN(c.ss[:0], urld(string(v)), ",", -1)
|
2013-11-18 03:49:19 +00:00
|
|
|
times := c.ss
|
|
|
|
c.mutateFileInfo(br, func(fi *camtypes.FileInfo) {
|
|
|
|
updateFileInfoTimes(fi, times)
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Corpus) mutateFileInfo(br blob.Ref, fn func(*camtypes.FileInfo)) {
|
2013-11-29 18:47:42 +00:00
|
|
|
br = c.br(br)
|
2013-11-18 03:49:19 +00:00
|
|
|
fi := c.files[br] // use zero value if not present
|
|
|
|
fn(&fi)
|
|
|
|
c.files[br] = fi
|
|
|
|
}
|
|
|
|
|
2013-12-07 19:08:19 +00:00
|
|
|
func (c *Corpus) mergeImageSizeRow(k, v []byte) error {
|
|
|
|
br, okk := blob.ParseBytes(k[len("imagesize|"):])
|
2013-11-26 03:46:28 +00:00
|
|
|
ii, okv := kvImageInfo(v)
|
|
|
|
if !okk || !okv {
|
|
|
|
return fmt.Errorf("bogus row %q = %q", k, v)
|
|
|
|
}
|
2013-11-29 18:47:42 +00:00
|
|
|
br = c.br(br)
|
2013-11-26 03:46:28 +00:00
|
|
|
c.imageInfo[br] = ii
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-31 00:49:43 +00:00
|
|
|
var sha1Prefix = []byte("sha1-")
|
|
|
|
|
2013-12-24 00:21:19 +00:00
|
|
|
// "wholetofile|sha1-17b53c7c3e664d3613dfdce50ef1f2a09e8f04b5|sha1-fb88f3eab3acfcf3cfc8cd77ae4366f6f975d227" -> "1"
|
|
|
|
func (c *Corpus) mergeWholeToFileRow(k, v []byte) error {
|
|
|
|
pair := k[len("wholetofile|"):]
|
|
|
|
pipe := bytes.IndexByte(pair, '|')
|
|
|
|
if pipe < 0 {
|
|
|
|
return fmt.Errorf("bogus row %q = %q", k, v)
|
|
|
|
}
|
|
|
|
wholeRef, ok1 := blob.ParseBytes(pair[:pipe])
|
|
|
|
fileRef, ok2 := blob.ParseBytes(pair[pipe+1:])
|
|
|
|
if !ok1 || !ok2 {
|
|
|
|
return fmt.Errorf("bogus row %q = %q", k, v)
|
|
|
|
}
|
|
|
|
c.fileWholeRef[fileRef] = wholeRef
|
2018-01-13 21:40:52 +00:00
|
|
|
if c.building && !c.hasLegacySHA1 {
|
|
|
|
if bytes.HasPrefix(pair, sha1Prefix) {
|
|
|
|
c.hasLegacySHA1 = true
|
|
|
|
}
|
|
|
|
}
|
2013-12-24 00:21:19 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-01-21 05:05:45 +00:00
|
|
|
// "mediatag|sha1-2b219be9d9691b4f8090e7ee2690098097f59566|album" = "Some+Album+Name"
|
|
|
|
func (c *Corpus) mergeMediaTag(k, v []byte) error {
|
2014-01-20 23:47:21 +00:00
|
|
|
f := strings.Split(string(k), "|")
|
2014-01-21 05:05:45 +00:00
|
|
|
if len(f) != 3 {
|
2014-01-20 23:47:21 +00:00
|
|
|
return fmt.Errorf("unexpected key %q", k)
|
|
|
|
}
|
2014-01-21 05:05:45 +00:00
|
|
|
wholeRef, ok := blob.Parse(f[1])
|
2014-01-20 23:47:21 +00:00
|
|
|
if !ok {
|
2014-01-21 05:05:45 +00:00
|
|
|
return fmt.Errorf("failed to parse wholeref from key %q", k)
|
2014-01-20 23:47:21 +00:00
|
|
|
}
|
2014-04-04 23:23:11 +00:00
|
|
|
tm, ok := c.mediaTags[wholeRef]
|
2014-01-20 23:47:21 +00:00
|
|
|
if !ok {
|
|
|
|
tm = make(map[string]string)
|
2014-04-04 23:23:11 +00:00
|
|
|
c.mediaTags[wholeRef] = tm
|
2014-01-20 23:47:21 +00:00
|
|
|
}
|
2014-01-21 05:05:45 +00:00
|
|
|
tm[c.str(f[2])] = c.str(urld(string(v)))
|
2014-01-20 23:47:21 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-12-24 00:21:19 +00:00
|
|
|
// "exifgps|sha1-17b53c7c3e664d3613dfdce50ef1f2a09e8f04b5" -> "-122.39897155555556|37.61952208333334"
|
|
|
|
func (c *Corpus) mergeEXIFGPSRow(k, v []byte) error {
|
|
|
|
wholeRef, ok := blob.ParseBytes(k[len("exifgps|"):])
|
|
|
|
pipe := bytes.IndexByte(v, '|')
|
|
|
|
if pipe < 0 || !ok {
|
|
|
|
return fmt.Errorf("bogus row %q = %q", k, v)
|
|
|
|
}
|
|
|
|
lat, err := strconv.ParseFloat(string(v[:pipe]), 64)
|
|
|
|
long, err1 := strconv.ParseFloat(string(v[pipe+1:]), 64)
|
|
|
|
if err != nil || err1 != nil {
|
2018-04-25 00:45:13 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("index: bogus latitude in value of row %q = %q", k, v)
|
|
|
|
} else {
|
|
|
|
log.Printf("index: bogus longitude in value of row %q = %q", k, v)
|
|
|
|
}
|
|
|
|
return nil
|
2013-12-24 00:21:19 +00:00
|
|
|
}
|
|
|
|
c.gps[wholeRef] = latLong{lat, long}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2013-11-30 20:04:04 +00:00
|
|
|
// This enables the blob.Parse fast path cache, which reduces CPU (via
|
|
|
|
// reduced GC from new garbage), but increases memory usage, even
|
|
|
|
// though it shouldn't. The GC should fully discard the brOfStr map
|
|
|
|
// (which we nil out at the end of parsing), but the Go GC doesn't
|
|
|
|
// seem to clear it all.
|
|
|
|
// TODO: investigate / file bugs.
|
|
|
|
const useBlobParseCache = false
|
|
|
|
|
|
|
|
func (c *Corpus) blobParse(v string) (br blob.Ref, ok bool) {
|
|
|
|
if useBlobParseCache {
|
|
|
|
br, ok = c.brOfStr[v]
|
|
|
|
if ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return blob.Parse(v)
|
|
|
|
}
|
|
|
|
|
2013-11-18 00:52:51 +00:00
|
|
|
// str returns s, interned.
|
|
|
|
func (c *Corpus) str(s string) string {
|
2013-11-17 02:50:01 +00:00
|
|
|
if s == "" {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
if s, ok := c.strs[s]; ok {
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
if c.strs == nil {
|
|
|
|
c.strs = make(map[string]string)
|
|
|
|
}
|
|
|
|
c.strs[s] = s
|
|
|
|
return s
|
|
|
|
}
|
2013-11-17 17:41:45 +00:00
|
|
|
|
2013-11-29 18:47:42 +00:00
|
|
|
// br returns br, interned.
|
|
|
|
func (c *Corpus) br(br blob.Ref) blob.Ref {
|
|
|
|
if bm, ok := c.blobs[br]; ok {
|
|
|
|
c.brInterns++
|
|
|
|
return bm.Ref
|
|
|
|
}
|
|
|
|
return br
|
|
|
|
}
|
|
|
|
|
2013-11-17 17:41:45 +00:00
|
|
|
// *********** Reading from the corpus
|
|
|
|
|
2017-08-19 05:26:51 +00:00
|
|
|
// EnumerateCamliBlobs calls fn for all known meta blobs.
|
2013-12-12 07:34:07 +00:00
|
|
|
//
|
2017-08-19 05:26:51 +00:00
|
|
|
// If camType is not empty, it specifies a filter for which meta blob
|
|
|
|
// types to call fn for. If empty, all are emitted.
|
|
|
|
//
|
|
|
|
// If fn returns false, iteration ends.
|
2021-01-17 03:05:35 +00:00
|
|
|
func (c *Corpus) EnumerateCamliBlobs(camType schema.CamliType, fn func(camtypes.BlobMeta) bool) {
|
2016-08-17 23:45:52 +00:00
|
|
|
if camType != "" {
|
|
|
|
for _, bm := range c.camBlobs[camType] {
|
2017-08-19 05:26:51 +00:00
|
|
|
if !fn(*bm) {
|
|
|
|
return
|
2016-08-17 23:45:52 +00:00
|
|
|
}
|
2013-11-17 18:52:37 +00:00
|
|
|
}
|
2017-08-19 05:26:51 +00:00
|
|
|
return
|
2016-08-17 23:45:52 +00:00
|
|
|
}
|
|
|
|
for _, m := range c.camBlobs {
|
2013-11-17 18:52:37 +00:00
|
|
|
for _, bm := range m {
|
2017-08-19 05:26:51 +00:00
|
|
|
if !fn(*bm) {
|
|
|
|
return
|
2013-12-03 04:01:37 +00:00
|
|
|
}
|
2013-11-17 18:52:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-19 05:26:51 +00:00
|
|
|
// EnumerateBlobMeta calls fn for all known meta blobs in an undefined
|
|
|
|
// order.
|
|
|
|
// If fn returns false, iteration ends.
|
|
|
|
func (c *Corpus) EnumerateBlobMeta(fn func(camtypes.BlobMeta) bool) {
|
2013-11-17 17:41:45 +00:00
|
|
|
for _, bm := range c.blobs {
|
2017-08-19 05:26:51 +00:00
|
|
|
if !fn(*bm) {
|
|
|
|
return
|
2013-12-03 04:01:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// pnAndTime is a value type wrapping a permanode blobref and its modtime.
|
2014-04-30 00:07:32 +00:00
|
|
|
// It's used by EnumeratePermanodesLastModified and EnumeratePermanodesCreated.
|
2013-12-03 04:01:37 +00:00
|
|
|
type pnAndTime struct {
|
|
|
|
pn blob.Ref
|
|
|
|
t time.Time
|
|
|
|
}
|
|
|
|
|
2014-04-30 00:07:32 +00:00
|
|
|
type byPermanodeTime []pnAndTime
|
2013-12-03 04:01:37 +00:00
|
|
|
|
2014-04-30 00:07:32 +00:00
|
|
|
func (s byPermanodeTime) Len() int { return len(s) }
|
|
|
|
func (s byPermanodeTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
|
|
func (s byPermanodeTime) Less(i, j int) bool {
|
2013-12-12 09:03:00 +00:00
|
|
|
if s[i].t.Equal(s[j].t) {
|
|
|
|
return s[i].pn.Less(s[j].pn)
|
|
|
|
}
|
|
|
|
return s[i].t.Before(s[j].t)
|
|
|
|
}
|
2013-12-03 04:01:37 +00:00
|
|
|
|
2014-06-11 16:37:08 +00:00
|
|
|
type lazySortedPermanodes struct {
|
|
|
|
c *Corpus
|
|
|
|
pnTime func(blob.Ref) (time.Time, bool) // returns permanode's time (if any) to sort on
|
|
|
|
|
|
|
|
mu sync.Mutex // guards sortedCache and ofGen
|
|
|
|
sortedCache []pnAndTime // nil if invalidated
|
|
|
|
sortedCacheReversed []pnAndTime // nil if invalidated
|
|
|
|
ofGen int64 // the Corpus.gen from which sortedCache was built
|
|
|
|
}
|
|
|
|
|
|
|
|
func reversedCopy(original []pnAndTime) []pnAndTime {
|
|
|
|
l := len(original)
|
|
|
|
reversed := make([]pnAndTime, l)
|
|
|
|
for k, v := range original {
|
|
|
|
reversed[l-1-k] = v
|
|
|
|
}
|
|
|
|
return reversed
|
|
|
|
}
|
|
|
|
|
|
|
|
func (lsp *lazySortedPermanodes) sorted(reverse bool) []pnAndTime {
|
|
|
|
lsp.mu.Lock()
|
|
|
|
defer lsp.mu.Unlock()
|
|
|
|
if lsp.ofGen == lsp.c.gen {
|
|
|
|
// corpus hasn't changed -> caches are still valid, if they exist.
|
|
|
|
if reverse {
|
|
|
|
if lsp.sortedCacheReversed != nil {
|
|
|
|
return lsp.sortedCacheReversed
|
|
|
|
}
|
|
|
|
if lsp.sortedCache != nil {
|
|
|
|
// using sortedCache to quickly build sortedCacheReversed
|
|
|
|
lsp.sortedCacheReversed = reversedCopy(lsp.sortedCache)
|
|
|
|
return lsp.sortedCacheReversed
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !reverse {
|
|
|
|
if lsp.sortedCache != nil {
|
|
|
|
return lsp.sortedCache
|
|
|
|
}
|
|
|
|
if lsp.sortedCacheReversed != nil {
|
|
|
|
// using sortedCacheReversed to quickly build sortedCache
|
|
|
|
lsp.sortedCache = reversedCopy(lsp.sortedCacheReversed)
|
|
|
|
return lsp.sortedCache
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// invalidate the caches
|
|
|
|
lsp.sortedCache = nil
|
|
|
|
lsp.sortedCacheReversed = nil
|
|
|
|
pns := make([]pnAndTime, 0, len(lsp.c.permanodes))
|
|
|
|
for pn := range lsp.c.permanodes {
|
2016-04-22 04:34:24 +00:00
|
|
|
if lsp.c.IsDeleted(pn) {
|
2014-02-13 00:39:53 +00:00
|
|
|
continue
|
|
|
|
}
|
2014-06-11 16:37:08 +00:00
|
|
|
if pt, ok := lsp.pnTime(pn); ok {
|
|
|
|
pns = append(pns, pnAndTime{pn, pt})
|
2013-12-03 04:01:37 +00:00
|
|
|
}
|
|
|
|
}
|
2014-06-11 16:37:08 +00:00
|
|
|
// and rebuild one of them
|
|
|
|
if reverse {
|
|
|
|
sort.Sort(sort.Reverse(byPermanodeTime(pns)))
|
|
|
|
lsp.sortedCacheReversed = pns
|
|
|
|
} else {
|
|
|
|
sort.Sort(byPermanodeTime(pns))
|
|
|
|
lsp.sortedCache = pns
|
|
|
|
}
|
|
|
|
lsp.ofGen = lsp.c.gen
|
2014-04-30 00:07:32 +00:00
|
|
|
return pns
|
|
|
|
}
|
|
|
|
|
2017-08-19 05:26:51 +00:00
|
|
|
func (c *Corpus) enumeratePermanodes(fn func(camtypes.BlobMeta) bool, pns []pnAndTime) {
|
2013-12-03 04:01:37 +00:00
|
|
|
for _, cand := range pns {
|
|
|
|
bm := c.blobs[cand.pn]
|
|
|
|
if bm == nil {
|
|
|
|
continue
|
|
|
|
}
|
2017-08-19 05:26:51 +00:00
|
|
|
if !fn(*bm) {
|
|
|
|
return
|
2013-12-03 04:01:37 +00:00
|
|
|
}
|
2013-11-17 17:41:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-19 05:26:51 +00:00
|
|
|
// EnumeratePermanodesLastModified calls fn for all permanodes, sorted by most recently modified first.
|
|
|
|
// Iteration ends prematurely if fn returns false.
|
|
|
|
func (c *Corpus) EnumeratePermanodesLastModified(fn func(camtypes.BlobMeta) bool) {
|
|
|
|
c.enumeratePermanodes(fn, c.permanodesByModtime.sorted(true))
|
2014-04-30 00:07:32 +00:00
|
|
|
}
|
|
|
|
|
2017-08-19 05:26:51 +00:00
|
|
|
// EnumeratePermanodesCreated calls fn for all permanodes.
|
2014-04-30 00:07:32 +00:00
|
|
|
// They are sorted using the contents creation date if any, the permanode modtime
|
|
|
|
// otherwise, and in the order specified by newestFirst.
|
2017-08-19 05:26:51 +00:00
|
|
|
// Iteration ends prematurely if fn returns false.
|
|
|
|
func (c *Corpus) EnumeratePermanodesCreated(fn func(camtypes.BlobMeta) bool, newestFirst bool) {
|
|
|
|
c.enumeratePermanodes(fn, c.permanodesByTime.sorted(newestFirst))
|
2014-04-30 00:07:32 +00:00
|
|
|
}
|
|
|
|
|
2018-04-26 03:31:36 +00:00
|
|
|
// EnumerateSingleBlob calls fn with br's BlobMeta if br exists in the corpus.
|
|
|
|
func (c *Corpus) EnumerateSingleBlob(fn func(camtypes.BlobMeta) bool, br blob.Ref) {
|
|
|
|
if bm := c.blobs[br]; bm != nil {
|
|
|
|
fn(*bm)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-26 02:10:00 +00:00
|
|
|
// EnumeratePermanodesByNodeTypes enumerates over all permanodes that might
|
|
|
|
// have one of the provided camliNodeType values, calling fn for each. If fn returns false,
|
|
|
|
// enumeration ends.
|
|
|
|
func (c *Corpus) EnumeratePermanodesByNodeTypes(fn func(camtypes.BlobMeta) bool, camliNodeTypes []string) {
|
|
|
|
for _, t := range camliNodeTypes {
|
|
|
|
set := c.permanodesSetByNodeType[t]
|
|
|
|
for br := range set {
|
|
|
|
if bm := c.blobs[br]; bm != nil {
|
|
|
|
if !fn(*bm) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) GetBlobMeta(ctx context.Context, br blob.Ref) (camtypes.BlobMeta, error) {
|
2013-11-17 17:41:45 +00:00
|
|
|
bm, ok := c.blobs[br]
|
|
|
|
if !ok {
|
|
|
|
return camtypes.BlobMeta{}, os.ErrNotExist
|
|
|
|
}
|
2013-11-17 18:52:37 +00:00
|
|
|
return *bm, nil
|
2013-11-17 17:41:45 +00:00
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) KeyId(ctx context.Context, signer blob.Ref) (string, error) {
|
2013-11-17 17:41:45 +00:00
|
|
|
if v, ok := c.keyId[signer]; ok {
|
|
|
|
return v, nil
|
|
|
|
}
|
2013-11-23 07:24:54 +00:00
|
|
|
return "", sorted.ErrNotFound
|
2013-11-17 17:41:45 +00:00
|
|
|
}
|
2013-11-18 00:52:51 +00:00
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) pnTimeAttr(pn blob.Ref, attr string) (t time.Time, ok bool) {
|
2018-03-02 17:29:52 +00:00
|
|
|
if v := c.PermanodeAttrValue(pn, attr, time.Time{}, ""); v != "" {
|
2014-07-31 21:24:03 +00:00
|
|
|
if t, err := time.Parse(time.RFC3339, v); err == nil {
|
|
|
|
return t, true
|
|
|
|
}
|
2014-04-30 00:07:32 +00:00
|
|
|
}
|
2014-07-31 21:24:03 +00:00
|
|
|
return
|
2014-04-30 00:07:32 +00:00
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
// PermanodeTime returns the time of the content in permanode.
|
|
|
|
func (c *Corpus) PermanodeTime(pn blob.Ref) (t time.Time, ok bool) {
|
2014-01-01 02:30:58 +00:00
|
|
|
// TODO(bradfitz): keep this time property cached on the permanode / files
|
2022-04-28 23:04:07 +00:00
|
|
|
// TODO(bradfitz): finish implementing all these
|
2014-01-01 02:30:58 +00:00
|
|
|
|
|
|
|
// Priorities:
|
|
|
|
// -- Permanode explicit "camliTime" property
|
|
|
|
// -- EXIF GPS time
|
2014-04-30 00:07:32 +00:00
|
|
|
// -- Exif camera time - this one is actually already in the FileInfo,
|
|
|
|
// because we use schema.FileTime (which returns the EXIF time, if available)
|
|
|
|
// to index the time when receiving a file.
|
2014-01-01 02:30:58 +00:00
|
|
|
// -- File time
|
|
|
|
// -- File modtime
|
|
|
|
// -- camliContent claim set time
|
2014-04-30 00:07:32 +00:00
|
|
|
|
new app: scanning cabinet
WARNING: this app is still experimental, and even its data schema might
change. Do not use in production.
This change adds a Camlistore-based port of the scanning cabinet app
originally created by Brad Fitzpatrick:
https://github.com/bradfitz/scanningcabinet
Some of it is inspired from the App Engine Go port of Patrick Borgeest:
https://bitbucket.org/pborgeest/nometicland
The data schema is roughly as follows:
-a scan is a permanode, with the node type: "scanningcabinet:scan".
-a scan's camliContent attribute is set to the actual image file.
-a scan also holds the "dateCreated" attribute, as well as the
"document" attribute, which references the document this scan is a part
of (if any).
-a document is a permanode, with the node type: "scanningcabinet:doc".
-a document page, is modeled by the "camliPath:sha1-xxx" = "pageNumber"
relation, where sha1-xxx is the blobRef of a scan.
-a document can also hold the following attributes: "dateCreated",
"tag", "locationText", "title", "startDate", and "paymentDueDate".
Known caveats, in decreasing order of concern:
-the data schema might still change.
-the scancab tool, to actually create and upload the files from physical
documents, is practically untested (since I do not own a scanner).
-some parts, in particular related to searches, are probably
sub-optimized.
-the usual unavoidable bugs.
Change-Id: If6afc509e13f7c21164a3abd276fec075a3813bb
2016-07-07 15:53:57 +00:00
|
|
|
if t, ok = c.pnTimeAttr(pn, nodeattr.PaymentDueDate); ok {
|
|
|
|
return
|
|
|
|
}
|
2016-04-22 04:34:24 +00:00
|
|
|
if t, ok = c.pnTimeAttr(pn, nodeattr.StartDate); ok {
|
2014-07-31 21:24:03 +00:00
|
|
|
return
|
2014-04-30 00:07:32 +00:00
|
|
|
}
|
2016-04-22 04:34:24 +00:00
|
|
|
if t, ok = c.pnTimeAttr(pn, nodeattr.DateCreated); ok {
|
2014-01-01 02:30:58 +00:00
|
|
|
return
|
|
|
|
}
|
2014-07-31 21:24:03 +00:00
|
|
|
var fi camtypes.FileInfo
|
2016-04-22 04:34:24 +00:00
|
|
|
ccRef, ccTime, ok := c.pnCamliContent(pn)
|
2014-07-31 21:24:03 +00:00
|
|
|
if ok {
|
|
|
|
fi, _ = c.files[ccRef]
|
|
|
|
}
|
|
|
|
if fi.Time != nil {
|
|
|
|
return time.Time(*fi.Time), true
|
|
|
|
}
|
2014-01-01 02:30:58 +00:00
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
if t, ok = c.pnTimeAttr(pn, nodeattr.DatePublished); ok {
|
2014-07-31 21:24:03 +00:00
|
|
|
return
|
|
|
|
}
|
2016-04-22 04:34:24 +00:00
|
|
|
if t, ok = c.pnTimeAttr(pn, nodeattr.DateModified); ok {
|
2014-07-31 21:24:03 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if fi.ModTime != nil {
|
|
|
|
return time.Time(*fi.ModTime), true
|
|
|
|
}
|
2014-01-01 02:30:58 +00:00
|
|
|
if ok {
|
2014-07-31 21:24:03 +00:00
|
|
|
return ccTime, true
|
2014-01-01 02:30:58 +00:00
|
|
|
}
|
2014-07-31 21:24:03 +00:00
|
|
|
return time.Time{}, false
|
2014-01-01 02:30:58 +00:00
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
// PermanodeAnyTime returns the time that best qualifies the permanode.
|
2014-04-30 00:07:32 +00:00
|
|
|
// It tries content-specific times first, the permanode modtime otherwise.
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) PermanodeAnyTime(pn blob.Ref) (t time.Time, ok bool) {
|
|
|
|
if t, ok := c.PermanodeTime(pn); ok {
|
2014-04-30 00:07:32 +00:00
|
|
|
return t, ok
|
|
|
|
}
|
2016-04-22 04:34:24 +00:00
|
|
|
return c.PermanodeModtime(pn)
|
2014-04-30 00:07:32 +00:00
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) pnCamliContent(pn blob.Ref) (cc blob.Ref, t time.Time, ok bool) {
|
2014-01-01 02:30:58 +00:00
|
|
|
// TODO(bradfitz): keep this property cached
|
|
|
|
pm, ok := c.permanodes[pn]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, cl := range pm.Claims {
|
|
|
|
if cl.Attr != "camliContent" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// TODO: pass down the 'PermanodeConstraint.At' parameter, and then do: if cl.Date.After(at) { continue }
|
|
|
|
switch cl.Type {
|
|
|
|
case string(schema.DelAttributeClaim):
|
|
|
|
cc = blob.Ref{}
|
|
|
|
t = time.Time{}
|
|
|
|
case string(schema.SetAttributeClaim):
|
|
|
|
cc = blob.ParseOrZero(cl.Value)
|
|
|
|
t = cl.Date
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return cc, t, cc.Valid()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2013-11-27 03:47:00 +00:00
|
|
|
// PermanodeModtime returns the latest modification time of the given
|
|
|
|
// permanode.
|
|
|
|
//
|
|
|
|
// The ok value is true only if the permanode is known and has any
|
|
|
|
// non-deleted claims. A deleted claim is ignored and neither its
|
|
|
|
// claim date nor the date of the delete claim affect the modtime of
|
|
|
|
// the permanode.
|
|
|
|
func (c *Corpus) PermanodeModtime(pn blob.Ref) (t time.Time, ok bool) {
|
|
|
|
pm, ok := c.permanodes[pn]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2014-04-23 18:32:57 +00:00
|
|
|
|
2013-11-27 03:47:00 +00:00
|
|
|
// Note: We intentionally don't try to derive any information
|
|
|
|
// (except the owner, elsewhere) from the permanode blob
|
|
|
|
// itself. Even though the permanode blob sometimes has the
|
|
|
|
// GPG signature time, we intentionally ignore it.
|
|
|
|
for _, cl := range pm.Claims {
|
2016-04-22 04:34:24 +00:00
|
|
|
if c.IsDeleted(cl.BlobRef) {
|
2013-11-27 03:47:00 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if cl.Date.After(t) {
|
|
|
|
t = cl.Date
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return t, !t.IsZero()
|
2013-11-27 04:28:14 +00:00
|
|
|
}
|
2013-11-27 03:47:00 +00:00
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
// PermanodeAttrValue returns a single-valued attribute or "".
|
2018-03-02 17:29:52 +00:00
|
|
|
// signerFilter, if set, should be the GPG ID of a signer
|
|
|
|
// (e.g. 2931A67C26F5ABDA).
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) PermanodeAttrValue(permaNode blob.Ref,
|
2014-04-23 18:13:47 +00:00
|
|
|
attr string,
|
|
|
|
at time.Time,
|
2018-03-02 17:29:52 +00:00
|
|
|
signerFilter string) string {
|
2014-04-23 18:13:47 +00:00
|
|
|
pm, ok := c.permanodes[permaNode]
|
|
|
|
if !ok {
|
|
|
|
return ""
|
|
|
|
}
|
2018-01-31 18:30:47 +00:00
|
|
|
var signerRefs SignerRefSet
|
2018-03-02 17:29:52 +00:00
|
|
|
if signerFilter != "" {
|
|
|
|
signerRefs, ok = c.signerRefs[signerFilter]
|
2018-01-19 18:53:44 +00:00
|
|
|
if !ok {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-02 17:29:52 +00:00
|
|
|
if values, ok := pm.valuesAtSigner(at, signerFilter); ok {
|
2016-05-12 04:05:43 +00:00
|
|
|
v := values[attr]
|
|
|
|
if len(v) == 0 {
|
|
|
|
return ""
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
2016-05-12 04:05:43 +00:00
|
|
|
return v[0]
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
2018-01-19 18:53:44 +00:00
|
|
|
|
|
|
|
return claimPtrsAttrValue(pm.Claims, attr, at, signerRefs)
|
2014-04-23 18:13:47 +00:00
|
|
|
}
|
|
|
|
|
2016-05-12 04:05:43 +00:00
|
|
|
// permanodeAttrsOrClaims returns the best available source
|
|
|
|
// to query attr values of permaNode at the given time
|
2018-01-19 18:53:44 +00:00
|
|
|
// for the signerID, which is either:
|
2016-05-12 04:05:43 +00:00
|
|
|
// a. m that represents attr values for the parameters, or
|
|
|
|
// b. all claims of the permanode.
|
|
|
|
// Only one of m or claims will be non-nil.
|
|
|
|
//
|
|
|
|
// (m, nil) is returned if m represents attrValues
|
|
|
|
// valid for the specified parameters.
|
|
|
|
//
|
|
|
|
// (nil, claims) is returned if
|
|
|
|
// no cached attribute map is valid for the given time,
|
|
|
|
// because e.g. some claims are more recent than this time. In which
|
|
|
|
// case the caller should resort to query claims directly.
|
|
|
|
//
|
|
|
|
// (nil, nil) is returned if the permaNode does not exist,
|
2018-01-19 18:53:44 +00:00
|
|
|
// or permaNode exists and signerID is valid,
|
2016-05-12 04:05:43 +00:00
|
|
|
// but permaNode has no attributes for it.
|
|
|
|
//
|
|
|
|
// The returned values must not be changed by the caller.
|
|
|
|
func (c *Corpus) permanodeAttrsOrClaims(permaNode blob.Ref,
|
2018-01-19 18:53:44 +00:00
|
|
|
at time.Time, signerID string) (m map[string][]string, claims []*camtypes.Claim) {
|
2016-05-12 04:05:43 +00:00
|
|
|
|
|
|
|
pm, ok := c.permanodes[permaNode]
|
|
|
|
if !ok {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2018-01-19 18:53:44 +00:00
|
|
|
|
|
|
|
m, ok = pm.valuesAtSigner(at, signerID)
|
2016-05-12 04:05:43 +00:00
|
|
|
if ok {
|
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
return nil, pm.Claims
|
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
// AppendPermanodeAttrValues appends to dst all the values for the attribute
|
|
|
|
// attr set on permaNode.
|
2018-03-02 17:29:52 +00:00
|
|
|
// signerFilter, if set, should be the GPG ID of a signer (e.g. 2931A67C26F5ABDA).
|
2016-04-22 04:34:24 +00:00
|
|
|
// dst must start with length 0 (laziness, mostly)
|
|
|
|
func (c *Corpus) AppendPermanodeAttrValues(dst []string,
|
2013-11-27 04:28:14 +00:00
|
|
|
permaNode blob.Ref,
|
|
|
|
attr string,
|
2013-11-27 20:47:04 +00:00
|
|
|
at time.Time,
|
2018-03-02 17:29:52 +00:00
|
|
|
signerFilter string) []string {
|
2013-11-27 20:47:04 +00:00
|
|
|
if len(dst) > 0 {
|
|
|
|
panic("len(dst) must be 0")
|
|
|
|
}
|
2013-11-27 04:28:14 +00:00
|
|
|
pm, ok := c.permanodes[permaNode]
|
|
|
|
if !ok {
|
|
|
|
return dst
|
|
|
|
}
|
2018-03-02 17:29:52 +00:00
|
|
|
var signerRefs SignerRefSet
|
|
|
|
if signerFilter != "" {
|
|
|
|
signerRefs, ok = c.signerRefs[signerFilter]
|
|
|
|
if !ok {
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if values, ok := pm.valuesAtSigner(at, signerFilter); ok {
|
2016-05-12 04:05:43 +00:00
|
|
|
return append(dst, values[attr]...)
|
2016-01-27 16:09:18 +00:00
|
|
|
}
|
2013-11-27 20:47:04 +00:00
|
|
|
if at.IsZero() {
|
|
|
|
at = time.Now()
|
|
|
|
}
|
|
|
|
for _, cl := range pm.Claims {
|
|
|
|
if cl.Attr != attr || cl.Date.After(at) {
|
|
|
|
continue
|
|
|
|
}
|
2018-03-02 17:29:52 +00:00
|
|
|
if len(signerRefs) > 0 && !signerRefs.blobMatches(cl.Signer) {
|
2013-11-27 20:47:04 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch cl.Type {
|
|
|
|
case string(schema.DelAttributeClaim):
|
|
|
|
if cl.Value == "" {
|
|
|
|
dst = dst[:0] // delete all
|
|
|
|
} else {
|
|
|
|
for i := 0; i < len(dst); i++ {
|
|
|
|
v := dst[i]
|
|
|
|
if v == cl.Value {
|
|
|
|
copy(dst[i:], dst[i+1:])
|
|
|
|
dst = dst[:len(dst)-1]
|
|
|
|
i--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case string(schema.SetAttributeClaim):
|
|
|
|
dst = append(dst[:0], cl.Value)
|
|
|
|
case string(schema.AddAttributeClaim):
|
|
|
|
dst = append(dst, cl.Value)
|
|
|
|
}
|
|
|
|
}
|
2013-11-27 04:28:14 +00:00
|
|
|
return dst
|
2013-11-27 03:47:00 +00:00
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) AppendClaims(ctx context.Context, dst []camtypes.Claim, permaNode blob.Ref,
|
2018-01-31 18:30:47 +00:00
|
|
|
signerFilter string,
|
2013-11-18 00:52:51 +00:00
|
|
|
attrFilter string) ([]camtypes.Claim, error) {
|
|
|
|
pm, ok := c.permanodes[permaNode]
|
|
|
|
if !ok {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2018-01-24 01:19:49 +00:00
|
|
|
|
2018-01-31 18:30:47 +00:00
|
|
|
var signerRefs SignerRefSet
|
|
|
|
if signerFilter != "" {
|
|
|
|
signerRefs, ok = c.signerRefs[signerFilter]
|
2018-01-24 01:19:49 +00:00
|
|
|
if !ok {
|
|
|
|
return dst, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-18 00:52:51 +00:00
|
|
|
for _, cl := range pm.Claims {
|
2016-04-22 04:34:24 +00:00
|
|
|
if c.IsDeleted(cl.BlobRef) {
|
2013-11-18 00:52:51 +00:00
|
|
|
continue
|
|
|
|
}
|
2018-01-24 01:19:49 +00:00
|
|
|
|
|
|
|
if len(signerRefs) > 0 && !signerRefs.blobMatches(cl.Signer) {
|
2013-11-18 00:52:51 +00:00
|
|
|
continue
|
|
|
|
}
|
2018-01-24 01:19:49 +00:00
|
|
|
|
2013-11-18 00:52:51 +00:00
|
|
|
if attrFilter != "" && cl.Attr != attrFilter {
|
|
|
|
continue
|
|
|
|
}
|
2013-12-09 13:15:34 +00:00
|
|
|
dst = append(dst, *cl)
|
2013-11-18 00:52:51 +00:00
|
|
|
}
|
|
|
|
return dst, nil
|
|
|
|
}
|
2013-11-18 03:49:19 +00:00
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) GetFileInfo(ctx context.Context, fileRef blob.Ref) (fi camtypes.FileInfo, err error) {
|
2013-11-18 03:49:19 +00:00
|
|
|
fi, ok := c.files[fileRef]
|
|
|
|
if !ok {
|
|
|
|
err = os.ErrNotExist
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2013-11-26 03:46:28 +00:00
|
|
|
|
2017-04-11 13:43:19 +00:00
|
|
|
// GetDirChildren returns the direct children (static-set entries) of the directory dirRef.
|
2018-05-08 15:35:43 +00:00
|
|
|
// It only returns an error if dirRef does not exist.
|
2017-04-11 13:43:19 +00:00
|
|
|
func (c *Corpus) GetDirChildren(ctx context.Context, dirRef blob.Ref) (map[blob.Ref]struct{}, error) {
|
|
|
|
children, ok := c.dirChildren[dirRef]
|
|
|
|
if !ok {
|
2018-05-08 15:35:43 +00:00
|
|
|
if _, ok := c.files[dirRef]; !ok {
|
|
|
|
return nil, os.ErrNotExist
|
|
|
|
}
|
|
|
|
return nil, nil
|
2017-04-11 13:43:19 +00:00
|
|
|
}
|
|
|
|
return children, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetParentDirs returns the direct parents (directories) of the file or directory childRef.
|
2018-05-08 15:35:43 +00:00
|
|
|
// It only returns an error if childRef does not exist.
|
2017-04-11 13:43:19 +00:00
|
|
|
func (c *Corpus) GetParentDirs(ctx context.Context, childRef blob.Ref) (map[blob.Ref]struct{}, error) {
|
|
|
|
parents, ok := c.fileParents[childRef]
|
|
|
|
if !ok {
|
2018-05-08 15:35:43 +00:00
|
|
|
if _, ok := c.files[childRef]; !ok {
|
|
|
|
return nil, os.ErrNotExist
|
|
|
|
}
|
|
|
|
return nil, nil
|
2017-04-11 13:43:19 +00:00
|
|
|
}
|
|
|
|
return parents, nil
|
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) GetImageInfo(ctx context.Context, fileRef blob.Ref) (ii camtypes.ImageInfo, err error) {
|
2013-11-26 03:46:28 +00:00
|
|
|
ii, ok := c.imageInfo[fileRef]
|
|
|
|
if !ok {
|
|
|
|
err = os.ErrNotExist
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2013-12-09 07:26:33 +00:00
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) GetMediaTags(ctx context.Context, fileRef blob.Ref) (map[string]string, error) {
|
2014-01-20 23:47:21 +00:00
|
|
|
wholeRef, ok := c.fileWholeRef[fileRef]
|
|
|
|
if !ok {
|
2014-04-04 23:23:11 +00:00
|
|
|
return nil, os.ErrNotExist
|
|
|
|
}
|
|
|
|
tags, ok := c.mediaTags[wholeRef]
|
|
|
|
if !ok {
|
|
|
|
return nil, os.ErrNotExist
|
2014-01-20 23:47:21 +00:00
|
|
|
}
|
2014-04-04 23:23:11 +00:00
|
|
|
return tags, nil
|
2014-01-20 23:47:21 +00:00
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) GetWholeRef(ctx context.Context, fileRef blob.Ref) (wholeRef blob.Ref, ok bool) {
|
2014-07-29 21:37:43 +00:00
|
|
|
wholeRef, ok = c.fileWholeRef[fileRef]
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) FileLatLong(fileRef blob.Ref) (lat, long float64, ok bool) {
|
2013-12-24 00:21:19 +00:00
|
|
|
wholeRef, ok := c.fileWholeRef[fileRef]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ll, ok := c.gps[wholeRef]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return ll.lat, ll.long, true
|
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
// ForeachClaim calls fn for each claim of permaNode.
|
2016-02-02 18:12:45 +00:00
|
|
|
// If at is zero, all claims are yielded.
|
|
|
|
// If at is non-zero, claims after that point are skipped.
|
|
|
|
// If fn returns false, iteration ends.
|
|
|
|
// Iteration is in an undefined order.
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) ForeachClaim(permaNode blob.Ref, at time.Time, fn func(*camtypes.Claim) bool) {
|
2016-02-02 18:12:45 +00:00
|
|
|
pm, ok := c.permanodes[permaNode]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, cl := range pm.Claims {
|
|
|
|
if !at.IsZero() && cl.Date.After(at) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !fn(cl) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
// ForeachClaimBack calls fn for each claim with a value referencing br.
|
2014-03-14 01:47:11 +00:00
|
|
|
// If at is zero, all claims are yielded.
|
|
|
|
// If at is non-zero, claims after that point are skipped.
|
|
|
|
// If fn returns false, iteration ends.
|
|
|
|
// Iteration is in an undefined order.
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) ForeachClaimBack(value blob.Ref, at time.Time, fn func(*camtypes.Claim) bool) {
|
2014-03-14 01:47:11 +00:00
|
|
|
for _, cl := range c.claimBack[value] {
|
|
|
|
if !at.IsZero() && cl.Date.After(at) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !fn(cl) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-22 04:34:24 +00:00
|
|
|
// PermanodeHasAttrValue reports whether the permanode pn at
|
2014-03-14 01:47:41 +00:00
|
|
|
// time at (zero means now) has the given attribute with the given
|
|
|
|
// value. If the attribute is multi-valued, any may match.
|
2016-04-22 04:34:24 +00:00
|
|
|
func (c *Corpus) PermanodeHasAttrValue(pn blob.Ref, at time.Time, attr, val string) bool {
|
2014-03-14 01:47:41 +00:00
|
|
|
pm, ok := c.permanodes[pn]
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
2018-01-17 18:02:03 +00:00
|
|
|
if values, ok := pm.valuesAtSigner(at, ""); ok {
|
2016-05-12 04:05:43 +00:00
|
|
|
for _, v := range values[attr] {
|
2016-01-27 16:09:18 +00:00
|
|
|
if v == val {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2014-03-14 01:47:41 +00:00
|
|
|
if at.IsZero() {
|
|
|
|
at = time.Now()
|
|
|
|
}
|
|
|
|
ret := false
|
|
|
|
for _, cl := range pm.Claims {
|
|
|
|
if cl.Attr != attr {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if cl.Date.After(at) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
switch cl.Type {
|
|
|
|
case string(schema.DelAttributeClaim):
|
|
|
|
if cl.Value == "" || cl.Value == val {
|
|
|
|
ret = false
|
|
|
|
}
|
|
|
|
case string(schema.SetAttributeClaim):
|
|
|
|
ret = (cl.Value == val)
|
|
|
|
case string(schema.AddAttributeClaim):
|
|
|
|
if cl.Value == val {
|
2016-01-27 16:09:18 +00:00
|
|
|
ret = true
|
2014-03-14 01:47:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2013-12-09 07:26:33 +00:00
|
|
|
// SetVerboseCorpusLogging controls corpus setup verbosity. It's on by default
|
|
|
|
// but used to disable verbose logging in tests.
|
|
|
|
func SetVerboseCorpusLogging(v bool) {
|
|
|
|
logCorpusStats = v
|
|
|
|
}
|