mirror of https://github.com/perkeep/perkeep.git
share: refactored
-pkg/server/share.go handles share(d) blobs. -pkg/blobserver/gethandler no longer deals with share blobs and has been rewritten like the other handlers (no more auth in there). -client and signhandler changed accordingly. -serverconfig: auth moved up in makeCamliHandler -share parameter added to the user level and low level config. -share URL is now http(s)://host[:port]/[prefix/]share/sha1-xxxx. Addresses http://camlistore.org/issue/107 Change-Id: I0a2c07ac1a1e435b141702e0ff06dc8182721d65
This commit is contained in:
parent
2384f368f6
commit
f21cea5131
|
@ -18,19 +18,14 @@ package gethandler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"camlistore.org/pkg/auth"
|
||||
"camlistore.org/pkg/blobref"
|
||||
"camlistore.org/pkg/blobserver"
|
||||
"camlistore.org/pkg/httputil"
|
||||
|
@ -40,8 +35,7 @@ var kGetPattern = regexp.MustCompile(`/camli/` + blobref.Pattern + `$`)
|
|||
|
||||
// Handler is the HTTP handler for serving GET requests of blobs.
|
||||
type Handler struct {
|
||||
Fetcher blobref.StreamingFetcher
|
||||
AllowGlobalAccess bool
|
||||
Fetcher blobref.StreamingFetcher
|
||||
}
|
||||
|
||||
func CreateGetHandler(fetcher blobref.StreamingFetcher) func(http.ResponseWriter, *http.Request) {
|
||||
|
@ -56,9 +50,6 @@ func CreateGetHandler(fetcher blobref.StreamingFetcher) func(http.ResponseWriter
|
|||
}
|
||||
}
|
||||
|
||||
const fetchFailureDelayNs = 200e6 // 200 ms
|
||||
const maxJSONSize = 64 * 1024 // should be enough for everyone
|
||||
|
||||
func (h *Handler) ServeHTTP(conn http.ResponseWriter, req *http.Request) {
|
||||
blobRef := blobFromUrlPath(req.URL.Path)
|
||||
if blobRef == nil {
|
||||
|
@ -66,19 +57,11 @@ func (h *Handler) ServeHTTP(conn http.ResponseWriter, req *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case h.AllowGlobalAccess || auth.Allowed(req, auth.OpGet):
|
||||
serveBlobRef(conn, req, blobRef, h.Fetcher)
|
||||
case auth.TriedAuthorization(req):
|
||||
log.Printf("Attempted authorization failed on %s", req.URL)
|
||||
auth.SendUnauthorized(conn, req)
|
||||
default:
|
||||
handleGetViaSharing(conn, req, blobRef, h.Fetcher)
|
||||
}
|
||||
ServeBlobRef(conn, req, blobRef, h.Fetcher)
|
||||
}
|
||||
|
||||
// serveBlobRef sends 'blobref' to 'conn' as directed by the Range header in 'req'
|
||||
func serveBlobRef(rw http.ResponseWriter, req *http.Request, blobRef *blobref.BlobRef, fetcher blobref.StreamingFetcher) {
|
||||
// ServeBlobRef serves a blob.
|
||||
func ServeBlobRef(rw http.ResponseWriter, req *http.Request, blobRef *blobref.BlobRef, fetcher blobref.StreamingFetcher) {
|
||||
if w, ok := fetcher.(blobserver.ContextWrapper); ok {
|
||||
fetcher = w.WrapContext(req)
|
||||
}
|
||||
|
@ -128,109 +111,6 @@ func serveBlobRef(rw http.ResponseWriter, req *http.Request, blobRef *blobref.Bl
|
|||
// This time is the first commit of the Camlistore project.
|
||||
var dummyModTime = time.Unix(1276213335, 0)
|
||||
|
||||
// Unauthenticated user. Be paranoid.
|
||||
func handleGetViaSharing(conn http.ResponseWriter, req *http.Request,
|
||||
blobRef *blobref.BlobRef, fetcher blobref.StreamingFetcher) {
|
||||
|
||||
if w, ok := fetcher.(blobserver.ContextWrapper); ok {
|
||||
fetcher = w.WrapContext(req)
|
||||
}
|
||||
|
||||
viaPathOkay := false
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
if !viaPathOkay {
|
||||
// Insert a delay, to hide timing attacks probing
|
||||
// for the existence of blobs.
|
||||
sleep := fetchFailureDelayNs - (time.Now().Sub(startTime))
|
||||
if sleep > 0 {
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
}
|
||||
}()
|
||||
viaBlobs := make([]*blobref.BlobRef, 0)
|
||||
if via := req.FormValue("via"); via != "" {
|
||||
for _, vs := range strings.Split(via, ",") {
|
||||
if br := blobref.Parse(vs); br == nil {
|
||||
httputil.BadRequestError(conn, "Malformed blobref in via param")
|
||||
return
|
||||
} else {
|
||||
viaBlobs = append(viaBlobs, br)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fetchChain := make([]*blobref.BlobRef, 0)
|
||||
fetchChain = append(fetchChain, viaBlobs...)
|
||||
fetchChain = append(fetchChain, blobRef)
|
||||
for i, br := range fetchChain {
|
||||
switch i {
|
||||
case 0:
|
||||
file, size, err := fetcher.FetchStreaming(br)
|
||||
if err != nil {
|
||||
log.Printf("Fetch chain 0 of %s failed: %v", br.String(), err)
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
if size > maxJSONSize {
|
||||
log.Printf("Fetch chain 0 of %s too large", br.String())
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
jd := json.NewDecoder(file)
|
||||
m := make(map[string]interface{})
|
||||
if err := jd.Decode(&m); err != nil {
|
||||
log.Printf("Fetch chain 0 of %s wasn't JSON: %v", br.String(), err)
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
if m["camliType"].(string) != "share" {
|
||||
log.Printf("Fetch chain 0 of %s wasn't a share", br.String())
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
if len(fetchChain) > 1 && fetchChain[1].String() != m["target"].(string) {
|
||||
log.Printf("Fetch chain 0->1 (%s -> %q) unauthorized, expected hop to %q",
|
||||
br.String(), fetchChain[1].String(), m["target"])
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
case len(fetchChain) - 1:
|
||||
// Last one is fine (as long as its path up to here has been proven, and it's
|
||||
// not the first thing in the chain)
|
||||
continue
|
||||
default:
|
||||
file, _, err := fetcher.FetchStreaming(br)
|
||||
if err != nil {
|
||||
log.Printf("Fetch chain %d of %s failed: %v", i, br.String(), err)
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
lr := io.LimitReader(file, maxJSONSize)
|
||||
slurpBytes, err := ioutil.ReadAll(lr)
|
||||
if err != nil {
|
||||
log.Printf("Fetch chain %d of %s failed in slurp: %v", i, br.String(), err)
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
saught := fetchChain[i+1].String()
|
||||
if bytes.IndexAny(slurpBytes, saught) == -1 {
|
||||
log.Printf("Fetch chain %d of %s failed; no reference to %s",
|
||||
i, br.String(), saught)
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
viaPathOkay = true
|
||||
|
||||
serveBlobRef(conn, req, blobRef, fetcher)
|
||||
|
||||
}
|
||||
|
||||
func blobFromUrlPath(path string) *blobref.BlobRef {
|
||||
return blobref.FromPattern(kGetPattern, path)
|
||||
}
|
||||
|
|
|
@ -47,9 +47,10 @@ type Client struct {
|
|||
// prefix is.
|
||||
server string
|
||||
|
||||
prefixOnce sync.Once
|
||||
prefixErr error
|
||||
prefixv string // URL prefix before "/camli/"
|
||||
prefixOnce sync.Once // guards init of following 3 fields
|
||||
prefixErr error
|
||||
prefixv string // URL prefix before "/camli/"
|
||||
isSharePrefix bool // URL is a request for a share blob
|
||||
|
||||
discoOnce sync.Once
|
||||
discoErr error
|
||||
|
@ -103,23 +104,25 @@ func NewOrFail() *Client {
|
|||
return c
|
||||
}
|
||||
|
||||
var shareURLRx = regexp.MustCompile(`^(.+)/camli/(` + blobref.Pattern + ")")
|
||||
var shareURLRx = regexp.MustCompile(`^(.+)/(` + blobref.Pattern + ")$")
|
||||
|
||||
func NewFromShareRoot(shareBlobURL string) (c *Client, target *blobref.BlobRef, err error) {
|
||||
var root string
|
||||
if m := shareURLRx.FindStringSubmatch(shareBlobURL); m == nil {
|
||||
return nil, nil, fmt.Errorf("Unkown URL base; doesn't contain /camli/")
|
||||
} else {
|
||||
c = New(m[1])
|
||||
c.discoOnce.Do(func() { /* nothing */
|
||||
})
|
||||
c.prefixOnce.Do(func() { /* nothing */
|
||||
})
|
||||
c.prefixv = m[1]
|
||||
c.authMode = auth.None{}
|
||||
c.via = make(map[string]string)
|
||||
root = m[2]
|
||||
m := shareURLRx.FindStringSubmatch(shareBlobURL)
|
||||
if m == nil {
|
||||
return nil, nil, fmt.Errorf("Unkown share URL base")
|
||||
}
|
||||
c = New(m[1])
|
||||
c.discoOnce.Do(func() { /* nothing */
|
||||
})
|
||||
c.prefixOnce.Do(func() { /* nothing */
|
||||
})
|
||||
c.prefixv = m[1]
|
||||
c.isSharePrefix = true
|
||||
c.authMode = auth.None{}
|
||||
c.via = make(map[string]string)
|
||||
root = m[2]
|
||||
|
||||
res, err := http.Get(shareBlobURL)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Error fetching %s: %v", shareBlobURL, err)
|
||||
|
@ -351,6 +354,9 @@ func (c *Client) FileHasContents(f, wholeRef *blobref.BlobRef) bool {
|
|||
return res.Header.Get("X-Camli-Contents") == wholeRef.String()
|
||||
}
|
||||
|
||||
// prefix returns the URL prefix before "/camli/", or before
|
||||
// the blobref hash in case of a share URL.
|
||||
// Examples: http://foo.com:3179/bs or http://foo.com:3179/share
|
||||
func (c *Client) prefix() (string, error) {
|
||||
c.prefixOnce.Do(func() { c.initPrefix() })
|
||||
if c.prefixErr != nil {
|
||||
|
@ -362,6 +368,19 @@ func (c *Client) prefix() (string, error) {
|
|||
return c.prefixv, nil
|
||||
}
|
||||
|
||||
// blobPrefix returns the URL prefix before the blobref hash.
|
||||
// Example: http://foo.com:3179/bs/camli or http://foo.com:3179/share
|
||||
func (c *Client) blobPrefix() (string, error) {
|
||||
pfx, err := c.prefix()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !c.isSharePrefix {
|
||||
pfx += "/camli"
|
||||
}
|
||||
return pfx, nil
|
||||
}
|
||||
|
||||
func (c *Client) discoRoot() string {
|
||||
s := c.server
|
||||
if !strings.HasPrefix(s, "http") {
|
||||
|
@ -375,6 +394,7 @@ func (c *Client) discoRoot() string {
|
|||
// component then it is directly used, otherwise the blobRoot
|
||||
// from the discovery is used as the path.
|
||||
func (c *Client) initPrefix() {
|
||||
c.isSharePrefix = false
|
||||
root := c.discoRoot()
|
||||
u, err := url.Parse(root)
|
||||
if err != nil {
|
||||
|
|
|
@ -40,7 +40,7 @@ var flagServer *string
|
|||
|
||||
func AddFlags() {
|
||||
defaultPath := ConfigFilePath()
|
||||
flagServer = flag.String("server", "", "Camlistore server prefix. If blank, the default from the \"server\" field of " + defaultPath + " is used. Acceptable forms: https://you.example.com, example.com:1345 (https assumed), or http://you.example.com/alt-root")
|
||||
flagServer = flag.String("server", "", "Camlistore server prefix. If blank, the default from the \"server\" field of "+defaultPath+" is used. Acceptable forms: https://you.example.com, example.com:1345 (https assumed), or http://you.example.com/alt-root")
|
||||
}
|
||||
|
||||
// ExplicitServer returns the blobserver given in the flags, if any.
|
||||
|
|
|
@ -68,11 +68,11 @@ func (c *Client) viaPathTo(b *blobref.BlobRef) (path []*blobref.BlobRef) {
|
|||
var blobsRx = regexp.MustCompile(blobref.Pattern)
|
||||
|
||||
func (c *Client) FetchVia(b *blobref.BlobRef, v []*blobref.BlobRef) (io.ReadCloser, int64, error) {
|
||||
pfx, err := c.prefix()
|
||||
pfx, err := c.blobPrefix()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
url := fmt.Sprintf("%s/camli/%s", pfx, b)
|
||||
url := fmt.Sprintf("%s/%s", pfx, b)
|
||||
|
||||
if len(v) > 0 {
|
||||
buf := bytes.NewBufferString(url)
|
||||
|
|
|
@ -116,8 +116,7 @@ func newJSONSignFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (http.Hand
|
|||
}
|
||||
h.pubKeyBlobRefServeSuffix = "camli/" + h.pubKeyBlobRef.String()
|
||||
h.pubKeyHandler = &gethandler.Handler{
|
||||
Fetcher: ms,
|
||||
AllowGlobalAccess: true, // just public keys
|
||||
Fetcher: ms,
|
||||
}
|
||||
|
||||
return h, nil
|
||||
|
|
|
@ -0,0 +1,185 @@
|
|||
/*
|
||||
Copyright 2013 The Camlistore Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"camlistore.org/pkg/auth"
|
||||
"camlistore.org/pkg/blobref"
|
||||
"camlistore.org/pkg/blobserver"
|
||||
"camlistore.org/pkg/blobserver/gethandler"
|
||||
"camlistore.org/pkg/httputil"
|
||||
"camlistore.org/pkg/jsonconfig"
|
||||
"camlistore.org/pkg/schema"
|
||||
)
|
||||
|
||||
const fetchFailureDelay = 200 * time.Millisecond
|
||||
|
||||
// ShareHandler handles the requests for "share" (and shared) blobs.
|
||||
type shareHandler struct {
|
||||
blobRoot string
|
||||
|
||||
fetcher blobref.StreamingFetcher
|
||||
}
|
||||
|
||||
func init() {
|
||||
blobserver.RegisterHandlerConstructor("share", newShareFromConfig)
|
||||
}
|
||||
|
||||
func newShareFromConfig(ld blobserver.Loader, conf jsonconfig.Obj) (h http.Handler, err error) {
|
||||
blobRoot := conf.RequiredString("blobRoot")
|
||||
if blobRoot == "" {
|
||||
return nil, errors.New("No blobRoot defined for share handler")
|
||||
}
|
||||
|
||||
share := &shareHandler{
|
||||
blobRoot: blobRoot,
|
||||
}
|
||||
bs, err := ld.GetStorage(share.blobRoot)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Share handler's blobRoot of %q error: %v", share.blobRoot, err)
|
||||
}
|
||||
fetcher, ok := bs.(blobref.StreamingFetcher)
|
||||
if !ok {
|
||||
return nil, errors.New("Share handler's storage not a StreamingFetcher.")
|
||||
}
|
||||
share.fetcher = fetcher
|
||||
return share, nil
|
||||
}
|
||||
|
||||
// Unauthenticated user. Be paranoid.
|
||||
func handleGetViaSharing(conn http.ResponseWriter, req *http.Request,
|
||||
blobRef *blobref.BlobRef, fetcher blobref.StreamingFetcher) {
|
||||
if req.Method != "GET" && req.Method != "HEAD" {
|
||||
httputil.BadRequestError(conn, "Invalid method")
|
||||
return
|
||||
}
|
||||
if w, ok := fetcher.(blobserver.ContextWrapper); ok {
|
||||
fetcher = w.WrapContext(req)
|
||||
}
|
||||
|
||||
viaPathOkay := false
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
if !viaPathOkay {
|
||||
// Insert a delay, to hide timing attacks probing
|
||||
// for the existence of blobs.
|
||||
sleep := fetchFailureDelay - (time.Now().Sub(startTime))
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
}()
|
||||
viaBlobs := make([]*blobref.BlobRef, 0)
|
||||
if via := req.FormValue("via"); via != "" {
|
||||
for _, vs := range strings.Split(via, ",") {
|
||||
if br := blobref.Parse(vs); br == nil {
|
||||
httputil.BadRequestError(conn, "Malformed blobref in via param")
|
||||
return
|
||||
} else {
|
||||
viaBlobs = append(viaBlobs, br)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fetchChain := make([]*blobref.BlobRef, 0)
|
||||
fetchChain = append(fetchChain, viaBlobs...)
|
||||
fetchChain = append(fetchChain, blobRef)
|
||||
for i, br := range fetchChain {
|
||||
switch i {
|
||||
case 0:
|
||||
file, size, err := fetcher.FetchStreaming(br)
|
||||
if err != nil {
|
||||
log.Printf("Fetch chain 0 of %s failed: %v", br.String(), err)
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
if size > schema.MaxSchemaBlobSize {
|
||||
log.Printf("Fetch chain 0 of %s too large", br.String())
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
jd := json.NewDecoder(file)
|
||||
m := make(map[string]interface{})
|
||||
if err := jd.Decode(&m); err != nil {
|
||||
log.Printf("Fetch chain 0 of %s wasn't JSON: %v", br.String(), err)
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
// TODO(mpl): make and use a struct type with json tags instead of map[string]interface{}.
|
||||
if m["camliType"].(string) != "share" {
|
||||
log.Printf("Fetch chain 0 of %s wasn't a share", br.String())
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
if len(fetchChain) > 1 && fetchChain[1].String() != m["target"].(string) {
|
||||
log.Printf("Fetch chain 0->1 (%s -> %q) unauthorized, expected hop to %q",
|
||||
br.String(), fetchChain[1].String(), m["target"])
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
case len(fetchChain) - 1:
|
||||
// Last one is fine (as long as its path up to here has been proven, and it's
|
||||
// not the first thing in the chain)
|
||||
continue
|
||||
default:
|
||||
file, _, err := fetcher.FetchStreaming(br)
|
||||
if err != nil {
|
||||
log.Printf("Fetch chain %d of %s failed: %v", i, br.String(), err)
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
lr := io.LimitReader(file, schema.MaxSchemaBlobSize)
|
||||
slurpBytes, err := ioutil.ReadAll(lr)
|
||||
if err != nil {
|
||||
log.Printf("Fetch chain %d of %s failed in slurp: %v", i, br.String(), err)
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
saught := fetchChain[i+1].String()
|
||||
if bytes.IndexAny(slurpBytes, saught) == -1 {
|
||||
log.Printf("Fetch chain %d of %s failed; no reference to %s",
|
||||
i, br.String(), saught)
|
||||
auth.SendUnauthorized(conn, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
viaPathOkay = true
|
||||
|
||||
gethandler.ServeBlobRef(conn, req, blobRef, fetcher)
|
||||
}
|
||||
|
||||
func (h *shareHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
|
||||
blobRef := blobref.Parse(req.Header.Get("X-PrefixHandler-PathSuffix"))
|
||||
if blobRef == nil {
|
||||
http.Error(rw, "Malformed share URL.", 400)
|
||||
return
|
||||
}
|
||||
handleGetViaSharing(rw, req, blobRef, h.fetcher)
|
||||
}
|
|
@ -31,11 +31,12 @@ import (
|
|||
// various parameters derived from the high-level user config
|
||||
// and needed to set up the low-level config.
|
||||
type configPrefixesParams struct {
|
||||
secretRing string
|
||||
keyId string
|
||||
indexerPath string
|
||||
blobPath string
|
||||
searchOwner *blobref.BlobRef
|
||||
secretRing string
|
||||
keyId string
|
||||
indexerPath string
|
||||
blobPath string
|
||||
searchOwner *blobref.BlobRef
|
||||
shareHandler bool
|
||||
}
|
||||
|
||||
var tempDir = os.TempDir
|
||||
|
@ -257,6 +258,15 @@ func genLowLevelPrefixes(params *configPrefixesParams) (m jsonconfig.Obj) {
|
|||
"handler": "setup",
|
||||
}
|
||||
|
||||
if params.shareHandler {
|
||||
m["/share/"] = map[string]interface{}{
|
||||
"handler": "share",
|
||||
"handlerArgs": map[string]interface{}{
|
||||
"blobRoot": "/bs/",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
m["/sighelper/"] = map[string]interface{}{
|
||||
"handler": "jsonsign",
|
||||
"handlerArgs": map[string]interface{}{
|
||||
|
@ -335,8 +345,9 @@ func genLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
|
|||
tlsKey = conf.OptionalString("HTTPSKeyFile", "")
|
||||
|
||||
// Blob storage options
|
||||
blobPath = conf.OptionalString("blobPath", "")
|
||||
s3 = conf.OptionalString("s3", "") // "access_key_id:secret_access_key:bucket"
|
||||
blobPath = conf.OptionalString("blobPath", "")
|
||||
s3 = conf.OptionalString("s3", "") // "access_key_id:secret_access_key:bucket"
|
||||
shareHandler = conf.OptionalBool("shareHandler", true) // enable the share handler
|
||||
|
||||
// Index options
|
||||
runIndex = conf.OptionalBool("runIndex", true) // if false: no search, no UI, etc.
|
||||
|
@ -424,11 +435,12 @@ func genLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
|
|||
}
|
||||
|
||||
prefixesParams := &configPrefixesParams{
|
||||
secretRing: secretRing,
|
||||
keyId: keyId,
|
||||
indexerPath: indexerPath,
|
||||
blobPath: blobPath,
|
||||
searchOwner: blobref.SHA1FromString(armoredPublicKey),
|
||||
secretRing: secretRing,
|
||||
keyId: keyId,
|
||||
indexerPath: indexerPath,
|
||||
blobPath: blobPath,
|
||||
searchOwner: blobref.SHA1FromString(armoredPublicKey),
|
||||
shareHandler: shareHandler,
|
||||
}
|
||||
|
||||
prefixes := genLowLevelPrefixes(prefixesParams)
|
||||
|
|
|
@ -108,29 +108,34 @@ func (s *storageAndConfig) GetStorage() blobserver.Storage {
|
|||
return s.Storage
|
||||
}
|
||||
|
||||
func handleCamliUsingStorage(conn http.ResponseWriter, req *http.Request, action string, storage blobserver.StorageConfiger) {
|
||||
func camliHandlerUsingStorage(req *http.Request, action string, storage blobserver.StorageConfiger) (func(http.ResponseWriter, *http.Request), auth.Operation) {
|
||||
handler := unsupportedHandler
|
||||
op := auth.OpAll
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
switch action {
|
||||
case "enumerate-blobs":
|
||||
handler = auth.RequireAuth(handlers.CreateEnumerateHandler(storage), auth.OpGet)
|
||||
handler = handlers.CreateEnumerateHandler(storage)
|
||||
op = auth.OpGet
|
||||
case "stat":
|
||||
handler = auth.RequireAuth(handlers.CreateStatHandler(storage), auth.OpAll)
|
||||
handler = handlers.CreateStatHandler(storage)
|
||||
default:
|
||||
handler = gethandler.CreateGetHandler(storage)
|
||||
op = auth.OpGet
|
||||
}
|
||||
case "POST":
|
||||
switch action {
|
||||
case "stat":
|
||||
handler = auth.RequireAuth(handlers.CreateStatHandler(storage), auth.OpStat)
|
||||
handler = handlers.CreateStatHandler(storage)
|
||||
op = auth.OpStat
|
||||
case "upload":
|
||||
handler = auth.RequireAuth(handlers.CreateUploadHandler(storage), auth.OpUpload)
|
||||
handler = handlers.CreateUploadHandler(storage)
|
||||
op = auth.OpUpload
|
||||
case "remove":
|
||||
handler = auth.RequireAuth(handlers.CreateRemoveHandler(storage), auth.OpAll)
|
||||
handler = handlers.CreateRemoveHandler(storage)
|
||||
}
|
||||
}
|
||||
handler(conn, req)
|
||||
return handler, op
|
||||
}
|
||||
|
||||
// where prefix is like "/" or "/s3/" for e.g. "/camli/" or "/s3/camli/*"
|
||||
|
@ -162,7 +167,8 @@ func makeCamliHandler(prefix, baseURL string, storage blobserver.Storage, hf blo
|
|||
unsupportedHandler(conn, req)
|
||||
return
|
||||
}
|
||||
handleCamliUsingStorage(conn, req, action, storageConfig)
|
||||
handler := auth.RequireAuth(camliHandlerUsingStorage(req, action, storageConfig))
|
||||
handler(conn, req)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,14 @@
|
|||
|
||||
"/setup/": {
|
||||
"handler": "setup"
|
||||
},
|
||||
},
|
||||
|
||||
"/share/": {
|
||||
"handler": "share",
|
||||
"handlerArgs": {
|
||||
"blobRoot": "/bs/"
|
||||
}
|
||||
},
|
||||
|
||||
"/sync/": {
|
||||
"handler": "sync",
|
||||
|
|
|
@ -8,5 +8,6 @@
|
|||
"memIndex": true,
|
||||
"s3": "key:secret:bucket",
|
||||
"replicateTo": [],
|
||||
"publish": {}
|
||||
"publish": {},
|
||||
"shareHandler": true
|
||||
}
|
||||
|
|
|
@ -13,7 +13,14 @@
|
|||
|
||||
"/setup/": {
|
||||
"handler": "setup"
|
||||
},
|
||||
},
|
||||
|
||||
"/share/": {
|
||||
"handler": "share",
|
||||
"handlerArgs": {
|
||||
"blobRoot": "/bs/"
|
||||
}
|
||||
},
|
||||
|
||||
"/sighelper/": {
|
||||
"handler": "jsonsign",
|
||||
|
|
|
@ -5,5 +5,6 @@
|
|||
"blobPath": "/tmp/blobs",
|
||||
"identity": "26F5ABDA",
|
||||
"identitySecretRing": "/path/to/secring",
|
||||
"runIndex": false
|
||||
"runIndex": false,
|
||||
"shareHandler": true
|
||||
}
|
||||
|
|
|
@ -24,7 +24,14 @@
|
|||
|
||||
"/setup/": {
|
||||
"handler": "setup"
|
||||
},
|
||||
},
|
||||
|
||||
"/share/": {
|
||||
"handler": "share",
|
||||
"handlerArgs": {
|
||||
"blobRoot": "/bs/"
|
||||
}
|
||||
},
|
||||
|
||||
"/sync/": {
|
||||
"handler": "sync",
|
||||
|
|
|
@ -5,5 +5,6 @@
|
|||
"blobPath": "/tmp/blobs",
|
||||
"memIndex": true,
|
||||
"identity": "26F5ABDA",
|
||||
"identitySecretRing": "/path/to/secring"
|
||||
"identitySecretRing": "/path/to/secring",
|
||||
"shareHandler": true
|
||||
}
|
||||
|
|
|
@ -23,7 +23,14 @@
|
|||
|
||||
"/setup/": {
|
||||
"handler": "setup"
|
||||
},
|
||||
},
|
||||
|
||||
"/share/": {
|
||||
"handler": "share",
|
||||
"handlerArgs": {
|
||||
"blobRoot": "/bs/"
|
||||
}
|
||||
},
|
||||
|
||||
"/sync/": {
|
||||
"handler": "sync",
|
||||
|
|
|
@ -7,5 +7,6 @@
|
|||
"memIndex": true,
|
||||
"s3": "key:secret:bucket",
|
||||
"replicateTo": [],
|
||||
"publish": {}
|
||||
"publish": {},
|
||||
"shareHandler": true
|
||||
}
|
||||
|
|
|
@ -23,7 +23,14 @@
|
|||
|
||||
"/setup/": {
|
||||
"handler": "setup"
|
||||
},
|
||||
},
|
||||
|
||||
"/share/": {
|
||||
"handler": "share",
|
||||
"handlerArgs": {
|
||||
"blobRoot": "/bs/"
|
||||
}
|
||||
},
|
||||
|
||||
"/sync/": {
|
||||
"handler": "sync",
|
||||
|
|
|
@ -5,5 +5,6 @@
|
|||
"identity": "26F5ABDA",
|
||||
"identitySecretRing": "/path/to/secring",
|
||||
"blobPath": "/tmp/blobs",
|
||||
"sqlite": "/tmp/camli.db"
|
||||
"sqlite": "/tmp/camli.db",
|
||||
"shareHandler": true
|
||||
}
|
||||
|
|
|
@ -25,7 +25,14 @@
|
|||
|
||||
"/setup/": {
|
||||
"handler": "setup"
|
||||
},
|
||||
},
|
||||
|
||||
"/share/": {
|
||||
"handler": "share",
|
||||
"handlerArgs": {
|
||||
"blobRoot": "/bs/"
|
||||
}
|
||||
},
|
||||
|
||||
"/sync/": {
|
||||
"handler": "sync",
|
||||
|
|
|
@ -10,5 +10,6 @@
|
|||
"memIndex": true,
|
||||
"s3": "",
|
||||
"replicateTo": [],
|
||||
"publish": {}
|
||||
"publish": {},
|
||||
"shareHandler": true
|
||||
}
|
||||
|
|
|
@ -36,7 +36,14 @@
|
|||
|
||||
"/setup/": {
|
||||
"handler": "setup"
|
||||
},
|
||||
},
|
||||
|
||||
"/share/": {
|
||||
"handler": "share",
|
||||
"handlerArgs": {
|
||||
"blobRoot": "/bs/"
|
||||
}
|
||||
},
|
||||
|
||||
"/sync/": {
|
||||
"handler": "sync",
|
||||
|
|
|
@ -14,5 +14,6 @@
|
|||
"style": "blog-purple.css"
|
||||
}
|
||||
},
|
||||
"replicateTo": []
|
||||
"replicateTo": [],
|
||||
"shareHandler": true
|
||||
}
|
||||
|
|
|
@ -38,7 +38,14 @@
|
|||
|
||||
"/setup/": {
|
||||
"handler": "setup"
|
||||
},
|
||||
},
|
||||
|
||||
"/share/": {
|
||||
"handler": "share",
|
||||
"handlerArgs": {
|
||||
"blobRoot": "/bs/"
|
||||
}
|
||||
},
|
||||
|
||||
"/sync/": {
|
||||
"handler": "sync",
|
||||
|
|
|
@ -14,5 +14,6 @@
|
|||
"style": "pics.css"
|
||||
}
|
||||
},
|
||||
"replicateTo": []
|
||||
"replicateTo": [],
|
||||
"shareHandler": true
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue