2011-01-28 07:07:18 +00:00
|
|
|
/*
|
|
|
|
Copyright 2011 Google Inc.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2013-07-08 01:52:14 +00:00
|
|
|
// Package client implements a Camlistore client.
|
2011-01-02 22:36:03 +00:00
|
|
|
package client
|
|
|
|
|
|
|
|
import (
|
2012-12-23 06:48:21 +00:00
|
|
|
"bytes"
|
2013-04-08 13:50:50 +00:00
|
|
|
"crypto/tls"
|
2012-11-08 04:23:45 +00:00
|
|
|
"encoding/json"
|
2012-12-23 02:42:35 +00:00
|
|
|
"errors"
|
2012-11-08 04:23:45 +00:00
|
|
|
"fmt"
|
2012-12-23 06:48:21 +00:00
|
|
|
"io"
|
2012-04-22 23:19:04 +00:00
|
|
|
"io/ioutil"
|
2011-01-18 18:29:38 +00:00
|
|
|
"log"
|
2013-04-08 13:50:50 +00:00
|
|
|
"net"
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
"net/http"
|
2012-11-08 04:23:45 +00:00
|
|
|
"net/url"
|
2011-01-18 18:29:38 +00:00
|
|
|
"os"
|
2013-08-19 15:17:35 +00:00
|
|
|
"path/filepath"
|
2013-01-02 20:55:12 +00:00
|
|
|
"regexp"
|
2012-11-08 04:23:45 +00:00
|
|
|
"strings"
|
2011-01-02 22:36:03 +00:00
|
|
|
"sync"
|
2013-07-11 06:46:23 +00:00
|
|
|
"time"
|
2011-12-02 10:35:28 +00:00
|
|
|
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
"camlistore.org/pkg/auth"
|
2013-08-04 02:54:30 +00:00
|
|
|
"camlistore.org/pkg/blob"
|
2013-06-22 23:35:48 +00:00
|
|
|
"camlistore.org/pkg/httputil"
|
2013-07-11 06:46:23 +00:00
|
|
|
"camlistore.org/pkg/jsonsign"
|
2013-04-08 13:50:50 +00:00
|
|
|
"camlistore.org/pkg/misc"
|
2013-01-02 20:55:12 +00:00
|
|
|
"camlistore.org/pkg/schema"
|
2013-02-08 05:55:17 +00:00
|
|
|
"camlistore.org/pkg/search"
|
2011-01-02 22:36:03 +00:00
|
|
|
)
|
|
|
|
|
2012-12-23 06:48:21 +00:00
|
|
|
// A Client provides access to a Camlistore server.
|
2011-09-17 22:14:37 +00:00
|
|
|
type Client struct {
|
2012-11-08 04:23:45 +00:00
|
|
|
// server is the input from user, pre-discovery.
|
|
|
|
// For example "http://foo.com" or "foo.com:1234".
|
|
|
|
// It is the responsibility of initPrefix to parse
|
|
|
|
// server and set prefix, including doing discovery
|
|
|
|
// to figure out what the proper server-declared
|
|
|
|
// prefix is.
|
|
|
|
server string
|
|
|
|
|
2013-02-28 23:30:16 +00:00
|
|
|
prefixOnce sync.Once // guards init of following 3 fields
|
|
|
|
prefixErr error
|
|
|
|
prefixv string // URL prefix before "/camli/"
|
|
|
|
isSharePrefix bool // URL is a request for a share blob
|
2012-12-23 06:48:21 +00:00
|
|
|
|
|
|
|
discoOnce sync.Once
|
|
|
|
discoErr error
|
2013-01-15 13:53:25 +00:00
|
|
|
searchRoot string // Handler prefix, or "" if none
|
|
|
|
downloadHelper string // or "" if none
|
|
|
|
storageGen string // storage generation, or "" if not reported
|
|
|
|
syncHandlers []*SyncInfo // "from" and "to" url prefix for each syncHandler
|
2012-11-08 04:23:45 +00:00
|
|
|
|
2013-07-11 06:46:23 +00:00
|
|
|
entityFetcherOnce sync.Once
|
|
|
|
entityFetcher jsonsign.EntityFetcher
|
|
|
|
|
2011-12-02 10:35:28 +00:00
|
|
|
authMode auth.AuthMode
|
2011-09-17 22:14:37 +00:00
|
|
|
|
|
|
|
httpClient *http.Client
|
2012-12-31 21:45:13 +00:00
|
|
|
haveCache HaveCache
|
2011-09-17 22:14:37 +00:00
|
|
|
|
2013-08-19 15:17:35 +00:00
|
|
|
initTrustedCertsOnce sync.Once
|
2013-04-08 13:50:50 +00:00
|
|
|
// We define a certificate fingerprint as the 10 digits lowercase prefix
|
|
|
|
// of the SHA1 of the complete certificate (in ASN.1 DER encoding).
|
|
|
|
// It is the same as what 'openssl x509 -fingerprint' shows and what
|
|
|
|
// web browsers commonly use (except truncated to 10 digits).
|
|
|
|
// trustedCerts contains the fingerprints of the self-signed
|
|
|
|
// certificates we trust.
|
|
|
|
// If not empty, (and if using TLS) the full x509 verification is
|
|
|
|
// disabled, and we instead check the server's certificate against
|
|
|
|
// that list.
|
|
|
|
// The camlistore server prints the fingerprint to add to the config
|
|
|
|
// when starting.
|
|
|
|
trustedCerts []string
|
2013-07-15 18:56:44 +00:00
|
|
|
// if set, we also skip the check against trustedCerts
|
|
|
|
InsecureTLS bool
|
2013-04-08 13:50:50 +00:00
|
|
|
|
2013-08-19 15:17:35 +00:00
|
|
|
initIgnoredFilesOnce sync.Once
|
|
|
|
// list of files that camput should ignore when using -filenodes.
|
|
|
|
// Defaults to empty, but camput init creates a config with a non
|
|
|
|
// empty list.
|
|
|
|
ignoredFiles []string
|
|
|
|
|
2013-08-23 14:06:40 +00:00
|
|
|
pendStatMu sync.Mutex // guards pendStat
|
|
|
|
pendStat map[blob.Ref][]statReq // blobref -> reqs; for next batch(es)
|
2013-02-03 05:51:08 +00:00
|
|
|
|
2013-08-23 19:30:01 +00:00
|
|
|
initSelfPubKeyDirOnce sync.Once
|
|
|
|
// dir containing the public key(s) blob(s).
|
|
|
|
// Defaults to osutil.KeyblobsDir().
|
|
|
|
selfPubKeyDir string
|
|
|
|
|
2011-09-17 22:14:37 +00:00
|
|
|
statsMutex sync.Mutex
|
|
|
|
stats Stats
|
|
|
|
|
2013-01-02 20:55:12 +00:00
|
|
|
// via maps the access path from a share root to a desired target.
|
|
|
|
// It is non-nil when in "sharing" mode, where the Client is fetching
|
|
|
|
// a share.
|
|
|
|
via map[string]string // target => via (target is referenced from via)
|
|
|
|
|
2012-12-28 17:24:26 +00:00
|
|
|
log *log.Logger // not nil
|
|
|
|
reqGate chan bool
|
2011-09-17 22:14:37 +00:00
|
|
|
}
|
|
|
|
|
2012-12-28 17:24:26 +00:00
|
|
|
const maxParallelHTTP = 5
|
|
|
|
|
2012-12-23 06:48:21 +00:00
|
|
|
// New returns a new Camlistore Client.
|
|
|
|
// The provided server is either "host:port" (assumed http, not https) or a
|
|
|
|
// URL prefix, with or without a path.
|
|
|
|
// Errors are not returned until subsequent operations.
|
2011-11-16 10:41:38 +00:00
|
|
|
func New(server string) *Client {
|
2011-03-02 02:02:01 +00:00
|
|
|
return &Client{
|
2011-05-10 19:56:40 +00:00
|
|
|
server: server,
|
2011-05-10 13:25:18 +00:00
|
|
|
httpClient: http.DefaultClient,
|
2012-12-28 17:24:26 +00:00
|
|
|
reqGate: make(chan bool, maxParallelHTTP),
|
2012-12-31 21:45:13 +00:00
|
|
|
haveCache: noHaveCache{},
|
2011-03-02 02:02:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-31 21:45:13 +00:00
|
|
|
func NewOrFail() *Client {
|
2013-01-26 00:23:31 +00:00
|
|
|
c := New(serverOrDie())
|
2012-12-31 21:45:13 +00:00
|
|
|
c.log = log.New(os.Stderr, "", log.Ldate|log.Ltime)
|
|
|
|
err := c.SetupAuth()
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2013-06-22 23:35:48 +00:00
|
|
|
// TransportConfig contains options for SetupTransport.
|
|
|
|
type TransportConfig struct {
|
|
|
|
// Proxy optionally specifies the Proxy for the transport. Useful with
|
|
|
|
// camput for debugging even localhost requests.
|
|
|
|
Proxy func(*http.Request) (*url.URL, error)
|
|
|
|
Verbose bool // Verbose enables verbose logging of HTTP requests.
|
|
|
|
}
|
|
|
|
|
|
|
|
// TransportForConfig returns a transport for the client, setting the correct
|
|
|
|
// Proxy, Dial, and TLSClientConfig if needed. It does not mutate c.
|
|
|
|
// It is the caller's responsibility to then use that transport to set
|
|
|
|
// the client's httpClient with SetHTTPClient.
|
|
|
|
func (c *Client) TransportForConfig(tc *TransportConfig) http.RoundTripper {
|
|
|
|
if c == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
tlsConfig, err := c.TLSConfig()
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Error while configuring TLS for client: %v", err)
|
|
|
|
}
|
|
|
|
var transport http.RoundTripper
|
|
|
|
proxy := http.ProxyFromEnvironment
|
|
|
|
if tc != nil && tc.Proxy != nil {
|
|
|
|
proxy = tc.Proxy
|
|
|
|
}
|
|
|
|
transport = &http.Transport{
|
|
|
|
Dial: c.DialFunc(),
|
|
|
|
TLSClientConfig: tlsConfig,
|
|
|
|
Proxy: proxy,
|
|
|
|
}
|
|
|
|
httpStats := &httputil.StatsTransport{
|
|
|
|
Transport: transport,
|
|
|
|
}
|
|
|
|
if tc != nil {
|
|
|
|
httpStats.VerboseLog = tc.Verbose
|
|
|
|
}
|
|
|
|
transport = httpStats
|
2013-08-12 16:20:09 +00:00
|
|
|
if onAndroid() {
|
2013-06-22 23:35:48 +00:00
|
|
|
transport = &AndroidStatsTransport{transport}
|
|
|
|
}
|
|
|
|
return transport
|
|
|
|
}
|
|
|
|
|
2013-07-15 18:56:44 +00:00
|
|
|
type ClientOption interface {
|
|
|
|
modifyClient(*Client)
|
|
|
|
}
|
|
|
|
|
|
|
|
func OptionInsecure(v bool) ClientOption {
|
|
|
|
return optionInsecure(v)
|
|
|
|
}
|
|
|
|
|
|
|
|
type optionInsecure bool
|
|
|
|
|
|
|
|
func (o optionInsecure) modifyClient(c *Client) {
|
|
|
|
c.InsecureTLS = bool(o)
|
|
|
|
}
|
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
var shareURLRx = regexp.MustCompile(`^(.+)/(` + blob.Pattern + ")$")
|
2013-01-02 20:55:12 +00:00
|
|
|
|
2013-07-15 18:56:44 +00:00
|
|
|
// NewFromShareRoot uses shareBlobURL to set up and return a client that
|
|
|
|
// will be used to fetch shared blobs.
|
2013-08-04 02:54:30 +00:00
|
|
|
func NewFromShareRoot(shareBlobURL string, opts ...ClientOption) (c *Client, target blob.Ref, err error) {
|
2013-01-02 20:55:12 +00:00
|
|
|
var root string
|
2013-02-28 23:30:16 +00:00
|
|
|
m := shareURLRx.FindStringSubmatch(shareBlobURL)
|
|
|
|
if m == nil {
|
2013-08-04 02:54:30 +00:00
|
|
|
return nil, blob.Ref{}, fmt.Errorf("Unkown share URL base")
|
2013-02-28 23:30:16 +00:00
|
|
|
}
|
|
|
|
c = New(m[1])
|
|
|
|
c.discoOnce.Do(func() { /* nothing */
|
|
|
|
})
|
|
|
|
c.prefixOnce.Do(func() { /* nothing */
|
|
|
|
})
|
|
|
|
c.prefixv = m[1]
|
|
|
|
c.isSharePrefix = true
|
|
|
|
c.authMode = auth.None{}
|
|
|
|
c.via = make(map[string]string)
|
|
|
|
root = m[2]
|
|
|
|
|
2013-07-15 18:56:44 +00:00
|
|
|
for _, v := range opts {
|
|
|
|
v.modifyClient(c)
|
|
|
|
}
|
2013-06-22 23:35:48 +00:00
|
|
|
c.SetHTTPClient(&http.Client{Transport: c.TransportForConfig(nil)})
|
|
|
|
|
|
|
|
req := c.newRequest("GET", shareBlobURL, nil)
|
|
|
|
res, err := c.doReqGated(req)
|
2013-01-02 20:55:12 +00:00
|
|
|
if err != nil {
|
2013-08-04 02:54:30 +00:00
|
|
|
return nil, blob.Ref{}, fmt.Errorf("Error fetching %s: %v", shareBlobURL, err)
|
2013-01-02 20:55:12 +00:00
|
|
|
}
|
|
|
|
defer res.Body.Close()
|
2013-08-04 02:54:30 +00:00
|
|
|
b, err := schema.BlobFromReader(blob.ParseOrZero(root), res.Body)
|
2013-01-02 20:55:12 +00:00
|
|
|
if err != nil {
|
2013-08-04 02:54:30 +00:00
|
|
|
return nil, blob.Ref{}, fmt.Errorf("Error parsing JSON from %s: %v", shareBlobURL, err)
|
2013-01-02 20:55:12 +00:00
|
|
|
}
|
2013-08-04 02:54:30 +00:00
|
|
|
if b.ShareAuthType() != schema.ShareHaveRef {
|
|
|
|
return nil, blob.Ref{}, fmt.Errorf("Unknown share authType of %q", b.ShareAuthType())
|
2013-01-02 20:55:12 +00:00
|
|
|
}
|
2013-08-04 02:54:30 +00:00
|
|
|
target = b.ShareTarget()
|
|
|
|
if !target.Valid() {
|
|
|
|
return nil, blob.Ref{}, fmt.Errorf("No target.")
|
2013-01-02 20:55:12 +00:00
|
|
|
}
|
2013-01-22 17:52:01 +00:00
|
|
|
c.via[target.String()] = root
|
|
|
|
return c, target, nil
|
2013-01-02 20:55:12 +00:00
|
|
|
}
|
|
|
|
|
2012-11-08 03:06:10 +00:00
|
|
|
// SetHTTPClient sets the Camlistore client's HTTP client.
|
|
|
|
// If nil, the default HTTP client is used.
|
2012-11-08 03:03:46 +00:00
|
|
|
func (c *Client) SetHTTPClient(client *http.Client) {
|
2012-11-08 03:06:10 +00:00
|
|
|
if client == nil {
|
|
|
|
client = http.DefaultClient
|
|
|
|
}
|
2011-05-10 13:25:18 +00:00
|
|
|
c.httpClient = client
|
|
|
|
}
|
|
|
|
|
2012-12-31 21:45:13 +00:00
|
|
|
// A HaveCache caches whether a remote blobserver has a blob.
|
|
|
|
type HaveCache interface {
|
2013-08-04 02:54:30 +00:00
|
|
|
StatBlobCache(br blob.Ref) (size int64, ok bool)
|
|
|
|
NoteBlobExists(br blob.Ref, size int64)
|
2012-12-31 21:45:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type noHaveCache struct{}
|
|
|
|
|
2013-08-04 02:54:30 +00:00
|
|
|
func (noHaveCache) StatBlobCache(blob.Ref) (int64, bool) { return 0, false }
|
|
|
|
func (noHaveCache) NoteBlobExists(blob.Ref, int64) {}
|
2012-12-31 21:45:13 +00:00
|
|
|
|
|
|
|
func (c *Client) SetHaveCache(cache HaveCache) {
|
|
|
|
if cache == nil {
|
|
|
|
cache = noHaveCache{}
|
2011-11-16 10:41:38 +00:00
|
|
|
}
|
2012-12-31 21:45:13 +00:00
|
|
|
c.haveCache = cache
|
2011-01-18 18:29:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) SetLogger(logger *log.Logger) {
|
|
|
|
if logger == nil {
|
2012-04-22 23:19:04 +00:00
|
|
|
c.log = log.New(ioutil.Discard, "", 0)
|
2011-01-18 18:29:38 +00:00
|
|
|
} else {
|
|
|
|
c.log = logger
|
|
|
|
}
|
2011-01-02 22:36:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) Stats() Stats {
|
|
|
|
c.statsMutex.Lock()
|
|
|
|
defer c.statsMutex.Unlock()
|
2011-05-10 13:25:18 +00:00
|
|
|
return c.stats // copy
|
2011-01-02 22:36:03 +00:00
|
|
|
}
|
2011-01-15 01:22:45 +00:00
|
|
|
|
2012-12-23 02:42:35 +00:00
|
|
|
// ErrNoSearchRoot is returned by SearchRoot if the server doesn't support search.
|
|
|
|
var ErrNoSearchRoot = errors.New("client: server doesn't support search")
|
|
|
|
|
2012-12-31 19:36:58 +00:00
|
|
|
// ErrNoStorageGeneration is returned by StorageGeneration if the
|
|
|
|
// server doesn't report a storage generation value.
|
|
|
|
var ErrNoStorageGeneration = errors.New("client: server doesn't report a storage generation")
|
|
|
|
|
2013-01-15 13:53:25 +00:00
|
|
|
// ErrNoSync is returned by SyncHandlers if the server does not advertise syncs.
|
|
|
|
var ErrNoSync = errors.New("client: server has no sync handlers")
|
|
|
|
|
|
|
|
// BlobRoot returns the server's blobroot URL prefix.
|
|
|
|
// If the client was constructed with an explicit path,
|
|
|
|
// that path is used. Otherwise the server's
|
|
|
|
// default advertised blobRoot is used.
|
|
|
|
func (c *Client) BlobRoot() (string, error) {
|
|
|
|
prefix, err := c.prefix()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return prefix + "/", nil
|
|
|
|
}
|
|
|
|
|
2012-12-31 19:36:58 +00:00
|
|
|
// SearchRoot returns the server's search handler.
|
|
|
|
// If the server isn't running an index and search handler, the error
|
|
|
|
// will be ErrNoSearchRoot.
|
2012-12-23 02:42:35 +00:00
|
|
|
func (c *Client) SearchRoot() (string, error) {
|
2012-12-23 06:48:21 +00:00
|
|
|
c.condDiscovery()
|
|
|
|
if c.discoErr != nil {
|
|
|
|
return "", c.discoErr
|
|
|
|
}
|
|
|
|
if c.searchRoot == "" {
|
|
|
|
return "", ErrNoSearchRoot
|
|
|
|
}
|
|
|
|
return c.searchRoot, nil
|
|
|
|
}
|
|
|
|
|
2012-12-31 19:36:58 +00:00
|
|
|
// StorageGeneration returns the server's unique ID for its storage
|
|
|
|
// generation, reset whenever storage is reset, moved, or partially
|
|
|
|
// lost.
|
|
|
|
//
|
|
|
|
// This is a value that can be used in client cache keys to add
|
|
|
|
// certainty that they're talking to the same instance as previously.
|
|
|
|
//
|
|
|
|
// If the server doesn't return such a value, the error will be
|
|
|
|
// ErrNoStorageGeneration.
|
|
|
|
func (c *Client) StorageGeneration() (string, error) {
|
|
|
|
c.condDiscovery()
|
|
|
|
if c.discoErr != nil {
|
|
|
|
return "", c.discoErr
|
|
|
|
}
|
|
|
|
if c.storageGen == "" {
|
|
|
|
return "", ErrNoStorageGeneration
|
|
|
|
}
|
|
|
|
return c.storageGen, nil
|
|
|
|
}
|
|
|
|
|
2013-01-15 13:53:25 +00:00
|
|
|
// SyncInfo holds the data that were acquired with a discovery
|
|
|
|
// and that are relevant to a syncHandler.
|
|
|
|
type SyncInfo struct {
|
|
|
|
From string
|
|
|
|
To string
|
|
|
|
}
|
|
|
|
|
|
|
|
// SyncHandlers returns the server's sync handlers "from" and
|
|
|
|
// "to" prefix URLs.
|
|
|
|
// If the server isn't running any sync handler, the error
|
|
|
|
// will be ErrNoSync.
|
|
|
|
func (c *Client) SyncHandlers() ([]*SyncInfo, error) {
|
|
|
|
c.condDiscovery()
|
|
|
|
if c.discoErr != nil {
|
|
|
|
return nil, c.discoErr
|
|
|
|
}
|
|
|
|
if c.syncHandlers == nil {
|
|
|
|
return nil, ErrNoSync
|
|
|
|
}
|
|
|
|
return c.syncHandlers, nil
|
|
|
|
}
|
|
|
|
|
2013-02-08 05:55:17 +00:00
|
|
|
var _ search.IGetRecentPermanodes = (*Client)(nil)
|
|
|
|
|
2013-07-10 09:58:55 +00:00
|
|
|
// GetRecentPermanodes implements search.IGetRecentPermanodes against a remote server over HTTP.
|
2013-02-08 05:55:17 +00:00
|
|
|
func (c *Client) GetRecentPermanodes(req *search.RecentRequest) (*search.RecentResponse, error) {
|
|
|
|
sr, err := c.SearchRoot()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
url := sr + req.URLSuffix()
|
|
|
|
hreq := c.newRequest("GET", url)
|
|
|
|
hres, err := c.doReqGated(hreq)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer hres.Body.Close()
|
|
|
|
res := new(search.RecentResponse)
|
|
|
|
if err := json.NewDecoder(hres.Body).Decode(res); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err := res.Err(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2013-07-10 11:10:18 +00:00
|
|
|
func (c *Client) GetPermanodesWithAttr(req *search.WithAttrRequest) (*search.WithAttrResponse, error) {
|
|
|
|
sr, err := c.SearchRoot()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
url := sr + req.URLSuffix()
|
|
|
|
hreq := c.newRequest("GET", url)
|
|
|
|
hres, err := c.doReqGated(hreq)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer hres.Body.Close()
|
|
|
|
res := new(search.WithAttrResponse)
|
|
|
|
if err := json.NewDecoder(hres.Body).Decode(res); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err := res.Err(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2013-07-11 00:35:06 +00:00
|
|
|
func (c *Client) Describe(req *search.DescribeRequest) (*search.DescribeResponse, error) {
|
2013-07-10 11:10:18 +00:00
|
|
|
sr, err := c.SearchRoot()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-07-11 00:35:06 +00:00
|
|
|
url := sr + req.URLSuffix()
|
2013-07-10 11:10:18 +00:00
|
|
|
hreq := c.newRequest("GET", url)
|
|
|
|
hres, err := c.doReqGated(hreq)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer hres.Body.Close()
|
|
|
|
res := new(search.DescribeResponse)
|
|
|
|
if err := json.NewDecoder(hres.Body).Decode(res); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2012-12-23 06:48:21 +00:00
|
|
|
// SearchExistingFileSchema does a search query looking for an
|
|
|
|
// existing file with entire contents of wholeRef, then does a HEAD
|
|
|
|
// request to verify the file still exists on the server. If so,
|
|
|
|
// it returns that file schema's blobref.
|
|
|
|
//
|
2013-08-04 02:54:30 +00:00
|
|
|
// May return (zero, nil) on ENOENT. A non-nil error is only returned
|
2012-12-23 06:48:21 +00:00
|
|
|
// if there were problems searching.
|
2013-08-04 02:54:30 +00:00
|
|
|
func (c *Client) SearchExistingFileSchema(wholeRef blob.Ref) (blob.Ref, error) {
|
2012-12-23 06:48:21 +00:00
|
|
|
sr, err := c.SearchRoot()
|
|
|
|
if err != nil {
|
2013-08-04 02:54:30 +00:00
|
|
|
return blob.Ref{}, err
|
2012-12-23 06:48:21 +00:00
|
|
|
}
|
|
|
|
url := sr + "camli/search/files?wholedigest=" + wholeRef.String()
|
|
|
|
req := c.newRequest("GET", url)
|
2013-02-03 05:51:08 +00:00
|
|
|
res, err := c.doReqGated(req)
|
2012-12-23 06:48:21 +00:00
|
|
|
if err != nil {
|
2013-08-04 02:54:30 +00:00
|
|
|
return blob.Ref{}, err
|
2012-12-23 06:48:21 +00:00
|
|
|
}
|
2012-12-28 17:24:26 +00:00
|
|
|
defer res.Body.Close()
|
2012-12-23 06:48:21 +00:00
|
|
|
var buf bytes.Buffer
|
2012-12-26 21:17:36 +00:00
|
|
|
body := io.TeeReader(io.LimitReader(res.Body, 1<<20), &buf)
|
2012-12-26 21:36:45 +00:00
|
|
|
type justWriter struct {
|
|
|
|
io.Writer
|
|
|
|
}
|
2012-12-23 06:48:21 +00:00
|
|
|
if res.StatusCode != 200 {
|
2012-12-26 21:36:45 +00:00
|
|
|
io.Copy(justWriter{ioutil.Discard}, body) // golang.org/issue/4589
|
2013-08-04 02:54:30 +00:00
|
|
|
return blob.Ref{}, fmt.Errorf("client: got status code %d from URL %s; body %s", res.StatusCode, url, buf.String())
|
2012-12-23 06:48:21 +00:00
|
|
|
}
|
|
|
|
var ress struct {
|
2013-08-04 02:54:30 +00:00
|
|
|
Files []blob.Ref `json:"files"`
|
2012-12-23 06:48:21 +00:00
|
|
|
}
|
|
|
|
if err := json.NewDecoder(body).Decode(&ress); err != nil {
|
2012-12-26 21:36:45 +00:00
|
|
|
io.Copy(justWriter{ioutil.Discard}, body) // golang.org/issue/4589
|
2013-08-04 02:54:30 +00:00
|
|
|
return blob.Ref{}, fmt.Errorf("client: error parsing JSON from URL %s: %v; body=%s", url, err, buf.String())
|
2012-12-23 06:48:21 +00:00
|
|
|
}
|
|
|
|
if len(ress.Files) == 0 {
|
2013-08-04 02:54:30 +00:00
|
|
|
return blob.Ref{}, nil
|
2012-12-23 06:48:21 +00:00
|
|
|
}
|
|
|
|
for _, f := range ress.Files {
|
|
|
|
if c.FileHasContents(f, wholeRef) {
|
|
|
|
return f, nil
|
|
|
|
}
|
|
|
|
}
|
2013-08-04 02:54:30 +00:00
|
|
|
return blob.Ref{}, nil
|
2012-12-23 06:48:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// FileHasContents returns true iff f refers to a "file" or "bytes" schema blob,
|
|
|
|
// the server is configured with a "download helper", and the server responds
|
|
|
|
// that all chunks of 'f' are available and match the digest of wholeRef.
|
2013-08-04 02:54:30 +00:00
|
|
|
func (c *Client) FileHasContents(f, wholeRef blob.Ref) bool {
|
2012-12-23 06:48:21 +00:00
|
|
|
c.condDiscovery()
|
|
|
|
if c.discoErr != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if c.downloadHelper == "" {
|
|
|
|
return false
|
|
|
|
}
|
2012-12-26 21:17:36 +00:00
|
|
|
req := c.newRequest("HEAD", c.downloadHelper+f.String()+"/?verifycontents="+wholeRef.String())
|
2013-02-03 05:51:08 +00:00
|
|
|
res, err := c.doReqGated(req)
|
2012-12-23 06:48:21 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Printf("download helper HEAD error: %v", err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
defer res.Body.Close()
|
|
|
|
return res.Header.Get("X-Camli-Contents") == wholeRef.String()
|
2012-12-23 02:42:35 +00:00
|
|
|
}
|
|
|
|
|
2013-02-28 23:30:16 +00:00
|
|
|
// prefix returns the URL prefix before "/camli/", or before
|
|
|
|
// the blobref hash in case of a share URL.
|
|
|
|
// Examples: http://foo.com:3179/bs or http://foo.com:3179/share
|
2012-11-08 04:23:45 +00:00
|
|
|
func (c *Client) prefix() (string, error) {
|
|
|
|
c.prefixOnce.Do(func() { c.initPrefix() })
|
|
|
|
if c.prefixErr != nil {
|
|
|
|
return "", c.prefixErr
|
|
|
|
}
|
2012-12-23 06:48:21 +00:00
|
|
|
if c.discoErr != nil {
|
|
|
|
return "", c.discoErr
|
|
|
|
}
|
2012-11-08 04:23:45 +00:00
|
|
|
return c.prefixv, nil
|
|
|
|
}
|
|
|
|
|
2013-02-28 23:30:16 +00:00
|
|
|
// blobPrefix returns the URL prefix before the blobref hash.
|
|
|
|
// Example: http://foo.com:3179/bs/camli or http://foo.com:3179/share
|
|
|
|
func (c *Client) blobPrefix() (string, error) {
|
|
|
|
pfx, err := c.prefix()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
if !c.isSharePrefix {
|
|
|
|
pfx += "/camli"
|
|
|
|
}
|
|
|
|
return pfx, nil
|
|
|
|
}
|
|
|
|
|
2012-12-23 06:48:21 +00:00
|
|
|
func (c *Client) discoRoot() string {
|
2012-11-08 04:23:45 +00:00
|
|
|
s := c.server
|
|
|
|
if !strings.HasPrefix(s, "http") {
|
|
|
|
s = "http://" + s
|
|
|
|
}
|
2012-12-23 06:48:21 +00:00
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2013-01-15 13:53:25 +00:00
|
|
|
// initPrefix uses the user provided server URL to define the URL
|
|
|
|
// prefix to the blobserver root. If the server URL has a path
|
|
|
|
// component then it is directly used, otherwise the blobRoot
|
|
|
|
// from the discovery is used as the path.
|
2012-12-23 06:48:21 +00:00
|
|
|
func (c *Client) initPrefix() {
|
2013-02-28 23:30:16 +00:00
|
|
|
c.isSharePrefix = false
|
2012-12-23 06:48:21 +00:00
|
|
|
root := c.discoRoot()
|
|
|
|
u, err := url.Parse(root)
|
2012-11-08 04:23:45 +00:00
|
|
|
if err != nil {
|
|
|
|
c.prefixErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(u.Path) > 1 {
|
2012-12-23 06:48:21 +00:00
|
|
|
c.prefixv = strings.TrimRight(root, "/")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.condDiscovery()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) condDiscovery() {
|
|
|
|
c.discoOnce.Do(func() { c.doDiscovery() })
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) doDiscovery() {
|
|
|
|
root, err := url.Parse(c.discoRoot())
|
|
|
|
if err != nil {
|
|
|
|
c.discoErr = err
|
2012-11-08 04:23:45 +00:00
|
|
|
return
|
|
|
|
}
|
2012-12-23 06:48:21 +00:00
|
|
|
|
2012-11-08 04:23:45 +00:00
|
|
|
// If the path is just "" or "/", do discovery against
|
|
|
|
// the URL to see which path we should actually use.
|
2013-06-04 21:36:16 +00:00
|
|
|
req := c.newRequest("GET", c.discoRoot(), nil)
|
2012-11-08 04:23:45 +00:00
|
|
|
req.Header.Set("Accept", "text/x-camli-configuration")
|
2013-02-03 05:51:08 +00:00
|
|
|
res, err := c.doReqGated(req)
|
2012-11-08 04:23:45 +00:00
|
|
|
if err != nil {
|
2012-12-23 06:48:21 +00:00
|
|
|
c.discoErr = err
|
2012-11-08 04:23:45 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
defer res.Body.Close()
|
|
|
|
if res.StatusCode != 200 {
|
2012-12-23 06:48:21 +00:00
|
|
|
c.discoErr = fmt.Errorf("Got status %q from blobserver URL %q during configuration discovery", res.Status, c.discoRoot())
|
2012-11-08 04:23:45 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// TODO(bradfitz): little weird in retrospect that we request
|
|
|
|
// text/x-camli-configuration and expect to get back
|
|
|
|
// text/javascript. Make them consistent.
|
|
|
|
if ct := res.Header.Get("Content-Type"); ct != "text/javascript" {
|
2012-12-23 06:48:21 +00:00
|
|
|
c.discoErr = fmt.Errorf("Blobserver returned unexpected type %q from discovery", ct)
|
2012-11-08 04:23:45 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
m := make(map[string]interface{})
|
|
|
|
if err := json.NewDecoder(res.Body).Decode(&m); err != nil {
|
2012-12-23 06:48:21 +00:00
|
|
|
c.discoErr = err
|
2012-11-08 04:23:45 +00:00
|
|
|
return
|
|
|
|
}
|
2012-12-23 06:48:21 +00:00
|
|
|
searchRoot, ok := m["searchRoot"].(string)
|
|
|
|
if ok {
|
|
|
|
u, err := root.Parse(searchRoot)
|
|
|
|
if err != nil {
|
|
|
|
c.discoErr = fmt.Errorf("client: invalid searchRoot %q; failed to resolve", searchRoot)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.searchRoot = u.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
downloadHelper, ok := m["downloadHelper"].(string)
|
|
|
|
if ok {
|
|
|
|
u, err := root.Parse(downloadHelper)
|
|
|
|
if err != nil {
|
|
|
|
c.discoErr = fmt.Errorf("client: invalid downloadHelper %q; failed to resolve", downloadHelper)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.downloadHelper = u.String()
|
|
|
|
}
|
|
|
|
|
2012-12-31 19:36:58 +00:00
|
|
|
c.storageGen, _ = m["storageGeneration"].(string)
|
|
|
|
|
2012-11-08 04:23:45 +00:00
|
|
|
blobRoot, ok := m["blobRoot"].(string)
|
|
|
|
if !ok {
|
2012-12-23 06:48:21 +00:00
|
|
|
c.discoErr = fmt.Errorf("No blobRoot in config discovery response")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
u, err := root.Parse(blobRoot)
|
|
|
|
if err != nil {
|
|
|
|
c.discoErr = fmt.Errorf("client: error resolving blobRoot: %v", err)
|
2012-11-08 04:23:45 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
c.prefixv = strings.TrimRight(u.String(), "/")
|
2013-01-15 13:53:25 +00:00
|
|
|
|
|
|
|
syncHandlers, ok := m["syncHandlers"].([]interface{})
|
|
|
|
if ok {
|
|
|
|
for _, v := range syncHandlers {
|
|
|
|
vmap := v.(map[string]interface{})
|
|
|
|
from := vmap["from"].(string)
|
|
|
|
ufrom, err := root.Parse(from)
|
|
|
|
if err != nil {
|
|
|
|
c.discoErr = fmt.Errorf("client: invalid %q \"from\" sync; failed to resolve", from)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
to := vmap["to"].(string)
|
|
|
|
uto, err := root.Parse(to)
|
|
|
|
if err != nil {
|
|
|
|
c.discoErr = fmt.Errorf("client: invalid %q \"to\" sync; failed to resolve", to)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.syncHandlers = append(c.syncHandlers,
|
|
|
|
&SyncInfo{From: ufrom.String(), To: uto.String()})
|
|
|
|
}
|
|
|
|
}
|
2012-11-08 04:23:45 +00:00
|
|
|
}
|
|
|
|
|
2013-01-02 04:23:44 +00:00
|
|
|
func (c *Client) newRequest(method, url string, body ...io.Reader) *http.Request {
|
|
|
|
var bodyR io.Reader
|
|
|
|
if len(body) > 0 {
|
|
|
|
bodyR = body[0]
|
|
|
|
}
|
|
|
|
if len(body) > 1 {
|
|
|
|
panic("too many body arguments")
|
|
|
|
}
|
2013-04-08 13:50:50 +00:00
|
|
|
req, err := http.NewRequest(method, c.condRewriteURL(url), bodyR)
|
2011-05-15 00:14:54 +00:00
|
|
|
if err != nil {
|
Update from r60 to [almost] Go 1.
A lot is still broken, but most stuff at least compiles now.
The directory tree has been rearranged now too. Go libraries are now
under "pkg". Fully qualified, they are e.g. "camlistore.org/pkg/jsonsign".
The go tool cannot yet fetch from arbitrary domains, but discussion is
happening now on which mechanism to use to allow that.
For now, put the camlistore root under $GOPATH/src. Typically $GOPATH
is $HOME, so Camlistore should be at $HOME/src/camlistore.org.
Then you can:
$ go build ./server/camlistored
... etc
The build.pl script is currently disabled. It'll be resurrected at
some point, but with a very different role (helping create a fake
GOPATH and running the go build command, if things are installed at
the wrong place, and/or running fileembed generators).
Many things are certainly broken.
Many things are disabled. (MySQL, all indexing, etc).
Many things need to be moved into
camlistore.org/third_party/{code.google.com,github.com} and updated
from their r60 to Go 1 versions, where applicable.
The GoMySQL stuff should be updated to use database/sql and the ziutek
library implementing database/sql/driver.
Help wanted.
Change-Id: If71217dc5c8f0e70dbe46e9504ca5131c6eeacde
2012-02-19 05:53:06 +00:00
|
|
|
panic(err.Error())
|
2011-05-15 00:14:54 +00:00
|
|
|
}
|
2013-01-02 04:23:44 +00:00
|
|
|
// not done by http.NewRequest in Go 1.0:
|
|
|
|
if br, ok := bodyR.(*bytes.Reader); ok {
|
|
|
|
req.ContentLength = int64(br.Len())
|
|
|
|
}
|
2011-12-02 10:35:28 +00:00
|
|
|
c.authMode.AddAuthHeader(req)
|
2011-05-15 00:14:54 +00:00
|
|
|
return req
|
|
|
|
}
|
2012-12-28 17:24:26 +00:00
|
|
|
|
2013-02-03 05:51:08 +00:00
|
|
|
func (c *Client) requestHTTPToken() {
|
2012-12-28 17:24:26 +00:00
|
|
|
c.reqGate <- true
|
2013-02-03 05:51:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) releaseHTTPToken() {
|
|
|
|
<-c.reqGate
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) doReqGated(req *http.Request) (*http.Response, error) {
|
|
|
|
c.requestHTTPToken()
|
|
|
|
defer c.releaseHTTPToken()
|
2012-12-28 17:24:26 +00:00
|
|
|
return c.httpClient.Do(req)
|
|
|
|
}
|
2013-04-08 13:50:50 +00:00
|
|
|
|
2013-07-15 18:56:44 +00:00
|
|
|
// insecureTLS returns whether the client is using TLS without any
|
|
|
|
// verification of the server's cert.
|
|
|
|
func (c *Client) insecureTLS() bool {
|
|
|
|
return c.useTLS() && c.InsecureTLS
|
|
|
|
}
|
|
|
|
|
2013-04-08 13:50:50 +00:00
|
|
|
// selfVerifiedSSL returns whether the client config has fingerprints for
|
|
|
|
// (self-signed) trusted certificates.
|
|
|
|
// When true, we run with InsecureSkipVerify and it is our responsibility
|
|
|
|
// to check the server's cert against our trusted certs.
|
|
|
|
func (c *Client) selfVerifiedSSL() bool {
|
|
|
|
return c.useTLS() && len(c.GetTrustedCerts()) > 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// condRewriteURL changes "https://" to "http://" if we are in
|
|
|
|
// selfVerifiedSSL mode. We need to do that because we do the TLS
|
|
|
|
// dialing ourselves, and we do not want the http transport layer
|
|
|
|
// to redo it.
|
|
|
|
func (c *Client) condRewriteURL(url string) string {
|
2013-07-15 18:56:44 +00:00
|
|
|
if c.selfVerifiedSSL() || c.insecureTLS() {
|
2013-04-08 13:50:50 +00:00
|
|
|
return strings.Replace(url, "https://", "http://", 1)
|
|
|
|
}
|
|
|
|
return url
|
|
|
|
}
|
|
|
|
|
|
|
|
// TLSConfig returns the correct tls.Config depending on whether
|
|
|
|
// SSL is required, the client's config has some trusted certs,
|
|
|
|
// and we're on android.
|
|
|
|
func (c *Client) TLSConfig() (*tls.Config, error) {
|
|
|
|
if !c.useTLS() {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
trustedCerts := c.GetTrustedCerts()
|
|
|
|
if len(trustedCerts) > 0 {
|
|
|
|
return &tls.Config{InsecureSkipVerify: true}, nil
|
|
|
|
}
|
|
|
|
if !onAndroid() {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return androidTLSConfig()
|
|
|
|
}
|
|
|
|
|
|
|
|
// DialFunc returns the adequate dial function, depending on
|
|
|
|
// whether SSL is required, the client's config has some trusted
|
|
|
|
// certs, and we're on android.
|
|
|
|
// If the client's config has some trusted certs, the server's
|
|
|
|
// certificate will be checked against those in the config after
|
|
|
|
// the TLS handshake.
|
|
|
|
func (c *Client) DialFunc() func(network, addr string) (net.Conn, error) {
|
|
|
|
trustedCerts := c.GetTrustedCerts()
|
2013-07-15 18:56:44 +00:00
|
|
|
if !c.useTLS() || (!c.InsecureTLS && len(trustedCerts) == 0) {
|
2013-04-08 13:50:50 +00:00
|
|
|
// No TLS, or TLS with normal/full verification
|
|
|
|
if onAndroid() {
|
|
|
|
return func(network, addr string) (net.Conn, error) {
|
|
|
|
return androidDial(network, addr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return func(network, addr string) (net.Conn, error) {
|
|
|
|
var conn *tls.Conn
|
|
|
|
var err error
|
|
|
|
if onAndroid() {
|
|
|
|
con, err := androidDial(network, addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
conn = tls.Client(con, &tls.Config{InsecureSkipVerify: true})
|
|
|
|
if err = conn.Handshake(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
conn, err = tls.Dial(network, addr, &tls.Config{InsecureSkipVerify: true})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2013-07-15 18:56:44 +00:00
|
|
|
if c.InsecureTLS {
|
|
|
|
return conn, nil
|
|
|
|
}
|
2013-04-08 13:50:50 +00:00
|
|
|
certs := conn.ConnectionState().PeerCertificates
|
|
|
|
if certs == nil || len(certs) < 1 {
|
|
|
|
return nil, errors.New("Could not get server's certificate from the TLS connection.")
|
|
|
|
}
|
|
|
|
sig := misc.SHA1Prefix(certs[0].Raw)
|
|
|
|
for _, v := range trustedCerts {
|
|
|
|
if v == sig {
|
|
|
|
return conn, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("Server's certificate %v is not in the trusted list", sig)
|
|
|
|
}
|
|
|
|
}
|
2013-07-11 06:46:23 +00:00
|
|
|
|
|
|
|
// Sign signs JSON as described in req.
|
|
|
|
// If req's EntityFetcher is nil, the client's entity fetcher is used.
|
|
|
|
// If req's Fetcher is nil, the client is used.
|
|
|
|
func (c *Client) Sign(req *jsonsign.SignRequest) (signedJSON string, err error) {
|
|
|
|
if req.Fetcher == nil {
|
|
|
|
req.Fetcher = c.GetBlobFetcher()
|
|
|
|
}
|
|
|
|
if req.EntityFetcher == nil {
|
|
|
|
req.EntityFetcher = c.SignerEntityFetcher()
|
|
|
|
}
|
|
|
|
return req.Sign()
|
|
|
|
}
|
|
|
|
|
|
|
|
// SignerEntityFetcher returns the client's configured GPG entity fetcher.
|
|
|
|
func (c *Client) SignerEntityFetcher() jsonsign.EntityFetcher {
|
|
|
|
c.entityFetcherOnce.Do(c.initEntityFetcher)
|
|
|
|
return c.entityFetcher
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) initEntityFetcher() {
|
|
|
|
c.entityFetcher = &jsonsign.CachingEntityFetcher{
|
|
|
|
Fetcher: &jsonsign.FileEntityFetcher{File: c.SecretRingFile()},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// sigTime optionally specifies the signature time.
|
|
|
|
// If zero, the current time is used.
|
|
|
|
func (c *Client) SignBlob(bb schema.Buildable, sigTime time.Time) (string, error) {
|
2013-09-22 18:38:42 +00:00
|
|
|
sigRef := c.SignerPublicKeyBlobref()
|
|
|
|
if !sigRef.Valid() {
|
2013-07-11 06:46:23 +00:00
|
|
|
// TODO: more helpful error message
|
|
|
|
return "", errors.New("No public key configured.")
|
|
|
|
}
|
|
|
|
|
2013-09-22 18:38:42 +00:00
|
|
|
b := bb.Builder().SetSigner(sigRef).Blob()
|
2013-07-11 06:46:23 +00:00
|
|
|
return c.Sign(&jsonsign.SignRequest{
|
|
|
|
UnsignedJSON: b.JSON(),
|
|
|
|
SignatureTime: sigTime,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) UploadAndSignBlob(b schema.AnyBlob) (*PutResult, error) {
|
|
|
|
signed, err := c.SignBlob(b.Blob(), time.Time{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-09-22 18:38:42 +00:00
|
|
|
|
|
|
|
// sigRef is guaranteed valid at this point, because SignBlob
|
|
|
|
// succeeded. If we don't know for sure that the server
|
|
|
|
// already has this public key, upload it. And do it serially
|
|
|
|
// so by the time we do the second upload of the signed blob,
|
|
|
|
// any synchronous indexing on the server won't fail due to a
|
|
|
|
// missing public key.
|
|
|
|
sigRef := c.SignerPublicKeyBlobref()
|
|
|
|
if _, keyUploaded := c.haveCache.StatBlobCache(sigRef); !keyUploaded {
|
|
|
|
if _, err := c.uploadString(publicKeyArmored); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-11 06:46:23 +00:00
|
|
|
return c.uploadString(signed)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) UploadBlob(b schema.AnyBlob) (*PutResult, error) {
|
|
|
|
// TODO(bradfitz): ask the blob for its own blobref, rather
|
|
|
|
// than changing the hash function with uploadString?
|
|
|
|
return c.uploadString(b.Blob().JSON())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) uploadString(s string) (*PutResult, error) {
|
|
|
|
return c.Upload(NewUploadHandleFromString(s))
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) UploadNewPermanode() (*PutResult, error) {
|
|
|
|
unsigned := schema.NewUnsignedPermanode()
|
|
|
|
return c.UploadAndSignBlob(unsigned)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) UploadPlannedPermanode(key string, sigTime time.Time) (*PutResult, error) {
|
|
|
|
unsigned := schema.NewPlannedPermanode(key)
|
|
|
|
signed, err := c.SignBlob(unsigned, sigTime)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return c.uploadString(signed)
|
|
|
|
}
|
2013-08-19 15:17:35 +00:00
|
|
|
|
|
|
|
// IsIgnoredFile returns whether the file name in fullpath
|
|
|
|
// is in the list of file names that should be ignored when
|
|
|
|
// uploading with camput -filenodes.
|
|
|
|
func (c *Client) IsIgnoredFile(fullpath string) bool {
|
|
|
|
filename := filepath.Base(fullpath)
|
|
|
|
for _, v := range c.getIgnoredFiles() {
|
|
|
|
if filename == v {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|