mirror of https://github.com/perkeep/perkeep.git
pkg/index: write relevant keys when receiving a delete claim
This change: 1) Checks if the incoming claim is a delete claim with the use of GetBlobMeta. 2) write the keyDeleted and keyDeletes keys when it's a delete claim, plus the usual keys when the target is a permanode. Yet to be done in the next CLs: 1) update the index deletes cache upon reception of a delete claim 2) update most of the search functions so they use deletedAt properly 3) add new keys necessary for GetRecentPermanodes to give a fully correct result. I also made indextest.DumpIndex public because it turned to be useful to debug within pkg/search/ as well. http://camlistore.org/issue/191 Change-Id: I8d8b9d12a535b8b1de0018b4a0e359241f14d52a
This commit is contained in:
parent
d0916a6802
commit
c81f3147f6
|
@ -74,7 +74,7 @@ func (id *IndexDeps) Set(key, value string) error {
|
|||
return id.Index.Storage().Set(key, value)
|
||||
}
|
||||
|
||||
func (id *IndexDeps) dumpIndex(t *testing.T) {
|
||||
func (id *IndexDeps) DumpIndex(t *testing.T) {
|
||||
t.Logf("Begin index dump:")
|
||||
it := id.Index.Storage().Find("")
|
||||
for it.Next() {
|
||||
|
@ -341,7 +341,7 @@ func Index(t *testing.T, initIdx func() *index.Index) {
|
|||
)
|
||||
|
||||
lastPermanodeMutation := id.lastTime()
|
||||
id.dumpIndex(t)
|
||||
id.DumpIndex(t)
|
||||
|
||||
key := "signerkeyid:sha1-ad87ca5c78bd0ce1195c46f7c98e6025abbaf007"
|
||||
if g, e := id.Get(key), "2931A67C26F5ABDA"; g != e {
|
||||
|
@ -610,7 +610,7 @@ func PathsOfSignerTarget(t *testing.T, initIdx func() *index.Index) {
|
|||
claim2 := id.SetAttribute(pn, "camliPath:with|pipe", "targ-124")
|
||||
t.Logf("made path claims %q and %q", claim1, claim2)
|
||||
|
||||
id.dumpIndex(t)
|
||||
id.DumpIndex(t)
|
||||
|
||||
type test struct {
|
||||
blobref string
|
||||
|
@ -657,7 +657,7 @@ func Files(t *testing.T, initIdx func() *index.Index) {
|
|||
fileTime := time.Unix(1361250375, 0)
|
||||
fileRef, wholeRef := id.UploadFile("foo.html", "<html>I am an html file.</html>", fileTime)
|
||||
t.Logf("uploaded fileref %q, wholeRef %q", fileRef, wholeRef)
|
||||
id.dumpIndex(t)
|
||||
id.DumpIndex(t)
|
||||
|
||||
// ExistingFileSchemas
|
||||
{
|
||||
|
@ -714,7 +714,7 @@ func EdgesTo(t *testing.T, initIdx func() *index.Index) {
|
|||
|
||||
t.Logf("edge %s --> %s", pn1, pn2)
|
||||
|
||||
id.dumpIndex(t)
|
||||
id.DumpIndex(t)
|
||||
|
||||
// Look for pn1
|
||||
{
|
||||
|
@ -740,7 +740,7 @@ func IsDeleted(t *testing.T, initIdx func() *index.Index) {
|
|||
idx := initIdx()
|
||||
id := NewIndexDeps(idx)
|
||||
id.Fataler = t
|
||||
defer id.dumpIndex(t)
|
||||
defer id.DumpIndex(t)
|
||||
pn1 := id.NewPermanode()
|
||||
|
||||
// delete pn1
|
||||
|
@ -790,7 +790,7 @@ func DeletedAt(t *testing.T, initIdx func() *index.Index) {
|
|||
idx := initIdx()
|
||||
id := NewIndexDeps(idx)
|
||||
id.Fataler = t
|
||||
defer id.dumpIndex(t)
|
||||
defer id.DumpIndex(t)
|
||||
pn1 := id.NewPermanode()
|
||||
|
||||
// Test the never, ever, deleted case
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
// requiredSchemaVersion is incremented every time
|
||||
// an index key type is added, changed, or removed.
|
||||
const requiredSchemaVersion = 2
|
||||
const requiredSchemaVersion = 3
|
||||
|
||||
// type of key returns the identifier in k before the first ":" or "|".
|
||||
// (Originally we packed keys by hand and there are a mix of styles)
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
_ "image/png"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -126,6 +127,8 @@ func (ix *Index) commit(mm mutationMap) error {
|
|||
// the blobref can be trusted at this point (it's been fully consumed
|
||||
// and verified to match), and the sniffer has been populated.
|
||||
func (ix *Index) populateMutationMap(br blob.Ref, sniffer *BlobSniffer) (mutationMap, error) {
|
||||
// TODO(mpl): shouldn't we remove these two from the map (so they don't get committed) when
|
||||
// e.g in populateClaim we detect a bogus claim (which does not yield an error)?
|
||||
mm := mutationMap{
|
||||
"have:" + br.String(): fmt.Sprintf("%d", sniffer.Size()),
|
||||
"meta:" + br.String(): fmt.Sprintf("%d|%s", sniffer.Size(), sniffer.MIMEType()),
|
||||
|
@ -137,10 +140,6 @@ func (ix *Index) populateMutationMap(br blob.Ref, sniffer *BlobSniffer) (mutatio
|
|||
if err := ix.populateClaim(blob, mm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "permanode":
|
||||
//if err := mi.populatePermanode(blobRef, camli, mm); err != nil {
|
||||
//return nil, err
|
||||
//}
|
||||
case "file":
|
||||
if err := ix.populateFile(blob, mm); err != nil {
|
||||
return nil, err
|
||||
|
@ -311,6 +310,41 @@ func (ix *Index) populateDir(b *schema.Blob, mm mutationMap) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// populateDeleteClaim adds to mm the entries resulting from the delete claim cl.
|
||||
// It is assumed cl is a valid claim, and vr has already been verified.
|
||||
func (ix *Index) populateDeleteClaim(cl schema.Claim, vr *jsonsign.VerifyRequest, mm mutationMap) {
|
||||
br := cl.Blob().BlobRef()
|
||||
target := cl.Target()
|
||||
if !target.Valid() {
|
||||
log.Print(fmt.Errorf("no valid target for delete claim %v", br))
|
||||
return
|
||||
}
|
||||
meta, err := ix.GetBlobMeta(target)
|
||||
if err != nil {
|
||||
if err == os.ErrNotExist {
|
||||
// TODO: return a dependency error type, to schedule re-indexing in the future
|
||||
}
|
||||
log.Print(fmt.Errorf("Could not get mime type of target blob %v: %v", target, err))
|
||||
return
|
||||
}
|
||||
// TODO(mpl): create consts somewhere for "claim" and "permanode" as camliTypes, and use them,
|
||||
// instead of hardcoding. Unless they already exist ? (didn't find them).
|
||||
if meta.CamliType != "permanode" && meta.CamliType != "claim" {
|
||||
log.Print(fmt.Errorf("delete claim target in %v is neither a permanode nor a claim: %v", br, meta.CamliType))
|
||||
return
|
||||
}
|
||||
mm.Set(keyDeleted.Key(target, cl.ClaimDateString(), br), "")
|
||||
mm.Set(keyDeletes.Key(br, target), "")
|
||||
if meta.CamliType == "claim" {
|
||||
return
|
||||
}
|
||||
recentKey := keyRecentPermanode.Key(vr.SignerKeyId, cl.ClaimDateString(), br)
|
||||
mm.Set(recentKey, target.String())
|
||||
attr, value := cl.Attribute(), cl.Value()
|
||||
claimKey := keyPermanodeClaim.Key(target, vr.SignerKeyId, cl.ClaimDateString(), br)
|
||||
mm.Set(claimKey, keyPermanodeClaim.Val(cl.ClaimType(), attr, value, vr.CamliSigner))
|
||||
}
|
||||
|
||||
func (ix *Index) populateClaim(b *schema.Blob, mm mutationMap) error {
|
||||
br := b.BlobRef()
|
||||
|
||||
|
@ -320,13 +354,6 @@ func (ix *Index) populateClaim(b *schema.Blob, mm mutationMap) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
pnbr := claim.ModifiedPermanode()
|
||||
if !pnbr.Valid() {
|
||||
// A different type of claim; not modifying a permanode.
|
||||
return nil
|
||||
}
|
||||
attr, value := claim.Attribute(), claim.Value()
|
||||
|
||||
vr := jsonsign.NewVerificationRequest(b.JSON(), ix.KeyFetcher)
|
||||
if !vr.Verify() {
|
||||
// TODO(bradfitz): ask if the vr.Err.(jsonsign.Error).IsPermanent() and retry
|
||||
|
@ -337,12 +364,22 @@ func (ix *Index) populateClaim(b *schema.Blob, mm mutationMap) error {
|
|||
return errors.New("index: populateClaim verification failure")
|
||||
}
|
||||
verifiedKeyId := vr.SignerKeyId
|
||||
|
||||
mm.Set("signerkeyid:"+vr.CamliSigner.String(), verifiedKeyId)
|
||||
|
||||
if claim.ClaimType() == string(schema.DeleteClaim) {
|
||||
ix.populateDeleteClaim(claim, vr, mm)
|
||||
return nil
|
||||
}
|
||||
|
||||
pnbr := claim.ModifiedPermanode()
|
||||
if !pnbr.Valid() {
|
||||
// A different type of claim; not modifying a permanode.
|
||||
return nil
|
||||
}
|
||||
|
||||
attr, value := claim.Attribute(), claim.Value()
|
||||
recentKey := keyRecentPermanode.Key(verifiedKeyId, claim.ClaimDateString(), br)
|
||||
mm.Set(recentKey, pnbr.String())
|
||||
|
||||
claimKey := keyPermanodeClaim.Key(pnbr, verifiedKeyId, claim.ClaimDateString(), br)
|
||||
mm.Set(claimKey, keyPermanodeClaim.Val(claim.ClaimType(), attr, value, vr.CamliSigner))
|
||||
|
||||
|
|
Loading…
Reference in New Issue