Merge "Adding Google Drive as a storage backend experimentally. No cheap-enough way of enumerating Drive files, needs to be fixed once Drive supports range queries. Allows you to sync to /sto-googledrive/, but not from."

This commit is contained in:
Brad Fitzpatrick 2013-08-17 15:51:51 +00:00 committed by Gerrit Code Review
commit b0d2a8f0e5
19 changed files with 7615 additions and 24 deletions

View File

@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package google registers the "google" blob storage type, storing blobs
// Package cloudstorage registers the "googlecloudstorage" blob storage type, storing blobs
// on Google Cloud Storage (not Google Drive).
package google
// See https://cloud.google.com/products/cloud-storage
package cloudstorage
import (
"bytes"
@ -140,5 +141,5 @@ func (gs *Storage) GetBlobHub() blobserver.BlobHub {
}
func init() {
blobserver.RegisterStorageConstructor("google", blobserver.StorageConstructor(newFromConfig))
blobserver.RegisterStorageConstructor("googlecloudstorage", blobserver.StorageConstructor(newFromConfig))
}

View File

@ -0,0 +1,88 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package drive registers the "googledrive" blobserver storage
type, storing blobs in a Google Drive folder.
Example low-level config:
"/storage-googledrive/": {
"handler": "storage-googledrive",
"handlerArgs": map[string]interface{}{
"parent_id": parentId,
"auth": map[string]interface{}{
"client_id": clientId,
"client_secret": clientSecret,
"refresh_token": refreshToken,
},
},
},
*/
package drive
import (
"net/http"
"time"
"camlistore.org/pkg/blobserver"
"camlistore.org/pkg/blobserver/google/drive/service"
"camlistore.org/pkg/jsonconfig"
"camlistore.org/third_party/code.google.com/p/goauth2/oauth"
)
const (
GoogleOAuth2AuthURL = "https://accounts.google.com/o/oauth2/auth"
GoogleOAuth2TokenURL = "https://accounts.google.com/o/oauth2/token"
)
type driveStorage struct {
*blobserver.SimpleBlobHubPartitionMap
service *service.DriveService
}
func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) {
auth := config.RequiredObject("auth")
oauthConf := &oauth.Config{
ClientId: auth.RequiredString("client_id"),
ClientSecret: auth.RequiredString("client_secret"),
AuthURL: GoogleOAuth2AuthURL,
TokenURL: GoogleOAuth2TokenURL,
}
// force refreshes the access token on start, make sure
// refresh request in parallel are being started
transport := &oauth.Transport{
Token: &oauth.Token{
AccessToken: "",
RefreshToken: auth.RequiredString("refresh_token"),
Expiry: time.Now(),
},
Config: oauthConf,
Transport: http.DefaultTransport,
}
service, err := service.New(transport, config.RequiredString("parent_id"))
sto := &driveStorage{
SimpleBlobHubPartitionMap: &blobserver.SimpleBlobHubPartitionMap{},
service: service,
}
return sto, err
}
func init() {
blobserver.RegisterStorageConstructor("googledrive", blobserver.StorageConstructor(newFromConfig))
}

View File

@ -0,0 +1,34 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package drive
import (
"time"
"camlistore.org/pkg/blob"
"camlistore.org/pkg/blobserver"
)
var _ blobserver.MaxEnumerateConfig = (*driveStorage)(nil)
func (sto *driveStorage) MaxEnumerate() int { return 1000 }
func (sto *driveStorage) EnumerateBlobs(dest chan<- blob.SizedRef, after string, limit int, wait time.Duration) error {
defer close(dest)
panic("not implemented")
return nil
}

View File

@ -0,0 +1,27 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package drive
import (
"io"
"camlistore.org/pkg/blob"
)
func (sto *driveStorage) FetchStreaming(blob blob.Ref) (file io.ReadCloser, size int64, reterr error) {
return sto.service.Fetch(blob.String())
}

View File

@ -0,0 +1,31 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package drive
import (
"io"
"camlistore.org/pkg/blob"
)
func (sto *driveStorage) ReceiveBlob(b blob.Ref, source io.Reader) (blob.SizedRef, error) {
file, err := sto.service.Upsert(b.String(), source)
if err != nil {
return blob.SizedRef{Ref: b, Size: 0}, err
}
return blob.SizedRef{Ref: b, Size: file.FileSize}, err
}

View File

@ -0,0 +1,31 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package drive
import (
"camlistore.org/pkg/blob"
)
func (sto *driveStorage) RemoveBlobs(blobs []blob.Ref) error {
var reterr error
for _, blob := range blobs {
if err := sto.service.Trash(blob.String()); err != nil {
reterr = err
}
}
return reterr
}

View File

@ -0,0 +1,138 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// DriveService translates blobserver.Storage methods
// into Google Drive API methods.
package service
import (
"fmt"
"io"
"net/http"
"camlistore.org/third_party/code.google.com/p/goauth2/oauth"
client "camlistore.org/third_party/code.google.com/p/google-api-go-client/drive/v2"
)
const (
MimeTypeDriveFolder = "application/vnd.google-apps.folder"
MimeTypeCamliBlob = "application/vnd.camlistore.blob"
)
// DriveService wraps Google Drive API to implement utility methods to
// be performed on the root Drive destination folder.
type DriveService struct {
transport *oauth.Transport
apiservice *client.Service
parentId string
}
// New initiates a new DriveService.
func New(transport *oauth.Transport, parentId string) (*DriveService, error) {
apiservice, err := client.New(transport.Client())
if err != nil {
return nil, err
}
service := &DriveService{transport: transport, apiservice: apiservice, parentId: parentId}
return service, err
}
// Get retrieves a file with its title
func (s *DriveService) Get(id string) (*client.File, error) {
req := s.apiservice.Files.List()
// TODO: use field selectors
query := fmt.Sprintf("'%s' in parents and title = '%s'", s.parentId, id)
req.Q(query)
files, err := req.Do()
if err != nil || len(files.Items) < 1 {
return nil, err
}
return files.Items[0], err
}
// Lists the folder identified by parentId.
func (s *DriveService) List(pageToken string, limit int) (files []*client.File, next string, err error) {
req := s.apiservice.Files.List()
req.Q(fmt.Sprintf("'%s' in parents and mimeType != '%s'", s.parentId, MimeTypeDriveFolder))
if pageToken != "" {
req.PageToken(pageToken)
}
if limit > 0 {
req.MaxResults(int64(limit))
}
result, err := req.Do()
if err != nil {
return
}
return result.Items, result.NextPageToken, err
}
// Upsert inserts a file, or updates if such a file exists.
func (s *DriveService) Upsert(id string, data io.Reader) (file *client.File, err error) {
if file, err = s.Get(id); err != nil {
return
}
if file == nil {
file = &client.File{Title: id}
file.Parents = []*client.ParentReference{
&client.ParentReference{Id: s.parentId},
}
file.MimeType = MimeTypeCamliBlob
return s.apiservice.Files.Insert(file).Media(data).Do()
}
// TODO: handle large blobs
return s.apiservice.Files.Update(file.Id, file).Media(data).Do()
}
// Fetch retrieves the metadata and contents of a file.
func (s *DriveService) Fetch(id string) (body io.ReadCloser, size int64, err error) {
file, err := s.Get(id)
// TODO: maybe in the case of no download link, remove the file.
// The file should have malformed or converted to a Docs file
// unwantedly.
if err != nil || file == nil || file.DownloadUrl != "" {
return
}
req, _ := http.NewRequest("GET", file.DownloadUrl, nil)
var resp *http.Response
if resp, err = s.transport.RoundTrip(req); err != nil {
return
}
return resp.Body, file.FileSize, err
}
// Stat retrieves file metadata and returns
// file size. Returns error if file is not found.
func (s *DriveService) Stat(id string) (int64, error) {
file, err := s.Get(id)
if err != nil || file == nil {
return 0, err
}
return file.FileSize, err
}
// Trash trashes an existing file.
func (s *DriveService) Trash(id string) (err error) {
_, err = s.apiservice.Files.Trash(id).Do()
return
}

View File

@ -0,0 +1,35 @@
/*
Copyright 2013 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package drive
import (
"time"
"camlistore.org/pkg/blob"
)
func (sto *driveStorage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref, wait time.Duration) error {
for _, br := range blobs {
size, err := sto.service.Stat(br.String())
if err == nil {
dest <- blob.SizedRef{Ref: br, Size: size}
} else {
return err
}
}
return nil
}

View File

@ -7,7 +7,7 @@ Server configuration
High-level server config is formatted like:
"google": "clientId:clientSecret:refreshToken:bucketName"
"googlecloudstorage": "clientId:clientSecret:refreshToken:bucketName"
Testing

View File

@ -249,10 +249,60 @@ func addS3Config(prefixes jsonconfig.Obj, s3 string) error {
return nil
}
func addGoogleConfig(prefixes jsonconfig.Obj, highCfg string) error {
func addGoogleDriveConfig(prefixes jsonconfig.Obj, highCfg string) error {
f := strings.SplitN(highCfg, ":", 4)
if len(f) != 4 {
return errors.New(`genconfig: expected "google" field to be of form "client_id:client_secret:refresh_token:bucket"`)
return errors.New(`genconfig: expected "googledrive" field to be of form "client_id:client_secret:refresh_token:parent_id"`)
}
clientId, secret, refreshToken, parentId := f[0], f[1], f[2], f[3]
isPrimary := false
if _, ok := prefixes["/bs/"]; !ok {
isPrimary = true
}
prefix := ""
if isPrimary {
prefix = "/bs/"
} else {
prefix = "/sto-googledrive/"
}
prefixes[prefix] = map[string]interface{}{
"handler": "storage-googledrive",
"handlerArgs": map[string]interface{}{
"parent_id": parentId,
"auth": map[string]interface{}{
"client_id": clientId,
"client_secret": secret,
"refresh_token": refreshToken,
},
},
}
if isPrimary {
prefixes["/cache/"] = map[string]interface{}{
"handler": "storage-filesystem",
"handlerArgs": map[string]interface{}{
"path": filepath.Join(tempDir(), "camli-cache"),
},
}
} else {
prefixes["/sync-to-googledrive/"] = map[string]interface{}{
"handler": "sync",
"handlerArgs": map[string]interface{}{
"from": "/bs/",
"to": prefix,
},
}
}
return nil
}
func addGoogleCloudStorageConfig(prefixes jsonconfig.Obj, highCfg string) error {
f := strings.SplitN(highCfg, ":", 4)
if len(f) != 4 {
return errors.New(`genconfig: expected "googlecloudstorage" field to be of form "client_id:client_secret:refresh_token:bucket"`)
}
clientId, secret, refreshToken, bucket := f[0], f[1], f[2], f[3]
@ -265,11 +315,11 @@ func addGoogleConfig(prefixes jsonconfig.Obj, highCfg string) error {
if isPrimary {
gsPrefix = "/bs/"
} else {
gsPrefix = "/sto-google/"
gsPrefix = "/sto-googlecloudstorage/"
}
prefixes[gsPrefix] = map[string]interface{}{
"handler": "storage-google",
"handler": "storage-googlecloudstorage",
"handlerArgs": map[string]interface{}{
"bucket": bucket,
"auth": map[string]interface{}{
@ -293,7 +343,7 @@ func addGoogleConfig(prefixes jsonconfig.Obj, highCfg string) error {
},
}
} else {
prefixes["/sync-to-google/"] = map[string]interface{}{
prefixes["/sync-to-googlecloudstorage/"] = map[string]interface{}{
"handler": "sync",
"handlerArgs": map[string]interface{}{
"from": "/bs/",
@ -426,9 +476,10 @@ func genLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
tlsKey = conf.OptionalString("HTTPSKeyFile", "")
// Blob storage options
blobPath = conf.OptionalString("blobPath", "")
s3 = conf.OptionalString("s3", "") // "access_key_id:secret_access_key:bucket"
gstorage = conf.OptionalString("google", "") // "clientId:clientSecret:refreshToken:bucket"
blobPath = conf.OptionalString("blobPath", "")
s3 = conf.OptionalString("s3", "") // "access_key_id:secret_access_key:bucket"
googlecloudstorage = conf.OptionalString("googlecloudstorage", "") // "clientId:clientSecret:refreshToken:bucket"
googledrive = conf.OptionalString("googledrive", "") // "clientId:clientSecret:refreshToken:parentId"
// Enable the share handler. If true, and shareHandlerPath is empty,
// then shareHandlerPath defaults to "/share/".
shareHandler = conf.OptionalBool("shareHandler", false)
@ -528,8 +579,8 @@ func genLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
}
nolocaldisk := blobPath == ""
if nolocaldisk && s3 == "" && gstorage == "" {
return nil, errors.New("You need at least one of blobPath (for localdisk) or s3 or google configured for a blobserver.")
if nolocaldisk && s3 == "" && googlecloudstorage == "" {
return nil, errors.New("You need at least one of blobPath (for localdisk) or s3 or googlecloudstorage configured for a blobserver.")
}
if shareHandler && shareHandlerPath == "" {
@ -592,8 +643,13 @@ func genLowLevelConfig(conf *Config) (lowLevelConf *Config, err error) {
return nil, err
}
}
if gstorage != "" {
if err := addGoogleConfig(prefixes, gstorage); err != nil {
if googledrive != "" {
if err := addGoogleDriveConfig(prefixes, googledrive); err != nil {
return nil, err
}
}
if googlecloudstorage != "" {
if err := addGoogleCloudStorageConfig(prefixes, googlecloudstorage); err != nil {
return nil, err
}
}

View File

@ -120,8 +120,8 @@
}
},
"/sto-google/": {
"handler": "storage-google",
"/sto-googlecloudstorage/": {
"handler": "storage-googlecloudstorage",
"handlerArgs": {
"auth": {
"client_id": "clientId",
@ -132,11 +132,31 @@
}
},
"/sync-to-google/": {
"/sync-to-googlecloudstorage/": {
"handler": "sync",
"handlerArgs": {
"from": "/bs/",
"to": "/sto-google/"
"to": "/sto-googlecloudstorage/"
}
},
"/sto-googledrive/": {
"handler": "storage-googledrive",
"handlerArgs": {
"auth": {
"client_id": "clientId",
"client_secret": "clientSecret",
"refresh_token": "refreshToken"
},
"parent_id": "parentDirId"
}
},
"/sync-to-googledrive/": {
"handler": "sync",
"handlerArgs": {
"from": "/bs/",
"to": "/sto-googledrive/"
}
}

View File

@ -7,7 +7,8 @@
"identitySecretRing": "/path/to/secring",
"memIndex": true,
"s3": "key:secret:bucket",
"google": "clientId:clientSecret:refreshToken:bucketName",
"googlecloudstorage": "clientId:clientSecret:refreshToken:bucketName",
"googledrive": "clientId:clientSecret:refreshToken:parentDirId",
"replicateTo": [],
"publish": {},
"ownerName": "Brad",

View File

@ -74,7 +74,7 @@
},
"/bs/": {
"handler": "storage-google",
"handler": "storage-googlecloudstorage",
"handlerArgs": {
"auth": {
"client_id": "clientId",

View File

@ -5,7 +5,7 @@
"identity": "26F5ABDA",
"identitySecretRing": "/path/to/secring",
"memIndex": true,
"google": "clientId:clientSecret:refreshToken:bucketName",
"googlecloudstorage": "clientId:clientSecret:refreshToken:bucketName",
"replicateTo": [],
"publish": {},
"shareHandler": true

View File

@ -51,7 +51,8 @@ import (
// Storage options:
_ "camlistore.org/pkg/blobserver/cond"
_ "camlistore.org/pkg/blobserver/encrypt"
_ "camlistore.org/pkg/blobserver/google"
_ "camlistore.org/pkg/blobserver/google/cloudstorage"
_ "camlistore.org/pkg/blobserver/google/drive"
_ "camlistore.org/pkg/blobserver/localdisk"
_ "camlistore.org/pkg/blobserver/remote"
_ "camlistore.org/pkg/blobserver/replica"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,266 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package googleapi contains the common code shared by all Google API
// libraries.
package googleapi
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/textproto"
"net/url"
"os"
"strings"
)
// ContentTyper is an interface for Readers which know (or would like
// to override) their Content-Type. If a media body doesn't implement
// ContentTyper, the type is sniffed from the content using
// http.DetectContentType.
type ContentTyper interface {
ContentType() string
}
const Version = "0.5"
type Error struct {
Code int `json:"code"`
Message string `json:"message"`
}
func (e *Error) Error() string {
return fmt.Sprintf("googleapi: Error %d: %s", e.Code, e.Message)
}
type errorReply struct {
Error *Error `json:"error"`
}
func CheckResponse(res *http.Response) error {
if res.StatusCode >= 200 && res.StatusCode <= 299 {
return nil
}
slurp, err := ioutil.ReadAll(res.Body)
if err == nil {
jerr := new(errorReply)
err = json.Unmarshal(slurp, jerr)
if err == nil && jerr.Error != nil {
return jerr.Error
}
}
return fmt.Errorf("googleapi: got HTTP response code %d and error reading body: %v",
res.StatusCode, err)
}
type MarshalStyle bool
var WithDataWrapper = MarshalStyle(true)
var WithoutDataWrapper = MarshalStyle(false)
func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
buf := new(bytes.Buffer)
if wrap {
buf.Write([]byte(`{"data": `))
}
err := json.NewEncoder(buf).Encode(v)
if err != nil {
return nil, err
}
if wrap {
buf.Write([]byte(`}`))
}
return buf, nil
}
func getMediaType(media io.Reader) (io.Reader, string) {
if typer, ok := media.(ContentTyper); ok {
return media, typer.ContentType()
}
typ := "application/octet-stream"
buf := make([]byte, 1024)
n, err := media.Read(buf)
buf = buf[:n]
if err == nil {
typ = http.DetectContentType(buf)
}
return io.MultiReader(bytes.NewBuffer(buf), media), typ
}
type Lengther interface {
Len() int
}
// endingWithErrorReader from r until it returns an error. If the
// final error from r is os.EOF and e is non-nil, e is used instead.
type endingWithErrorReader struct {
r io.Reader
e error
}
func (er endingWithErrorReader) Read(p []byte) (n int, err error) {
n, err = er.r.Read(p)
if err == io.EOF && er.e != nil {
err = er.e
}
return
}
func getReaderSize(r io.Reader) (io.Reader, int64) {
// Ideal case, the reader knows its own size.
if lr, ok := r.(Lengther); ok {
return r, int64(lr.Len())
}
// But maybe it's a seeker and we can seek to the end to find its size.
if s, ok := r.(io.Seeker); ok {
pos0, err := s.Seek(0, os.SEEK_CUR)
if err == nil {
posend, err := s.Seek(0, os.SEEK_END)
if err == nil {
_, err = s.Seek(pos0, os.SEEK_SET)
if err == nil {
return r, posend - pos0
} else {
// We moved it forward but can't restore it.
// Seems unlikely, but can't really restore now.
return endingWithErrorReader{strings.NewReader(""), err}, posend - pos0
}
}
}
}
// Otherwise we have to make a copy to calculate how big the reader is.
buf := new(bytes.Buffer)
// TODO(bradfitz): put a cap on this copy? spill to disk after
// a certain point?
_, err := io.Copy(buf, r)
return endingWithErrorReader{buf, err}, int64(buf.Len())
}
func typeHeader(contentType string) textproto.MIMEHeader {
h := make(textproto.MIMEHeader)
h.Set("Content-Type", contentType)
return h
}
// countingWriter counts the number of bytes it receives to write, but
// discards them.
type countingWriter struct {
n *int64
}
func (w countingWriter) Write(p []byte) (int, error) {
*w.n += int64(len(p))
return len(p), nil
}
// ConditionallyIncludeMedia does nothing if media is nil.
//
// bodyp is an in/out parameter. It should initially point to the
// reader of the application/json (or whatever) payload to send in the
// API request. It's updated to point to the multipart body reader.
//
// ctypep is an in/out parameter. It should initially point to the
// content type of the bodyp, usually "application/json". It's updated
// to the "multipart/related" content type, with random boundary.
//
// The return value is the content-length of the entire multpart body.
func ConditionallyIncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) (totalContentLength int64, ok bool) {
if media == nil {
return
}
// Get the media type and size. The type check might return a
// different reader instance, so do the size check first,
// which looks at the specific type of the io.Reader.
var mediaType string
if typer, ok := media.(ContentTyper); ok {
mediaType = typer.ContentType()
}
media, mediaSize := getReaderSize(media)
if mediaType == "" {
media, mediaType = getMediaType(media)
}
body, bodyType := *bodyp, *ctypep
body, bodySize := getReaderSize(body)
// Calculate how big the the multipart will be.
{
totalContentLength = bodySize + mediaSize
mpw := multipart.NewWriter(countingWriter{&totalContentLength})
mpw.CreatePart(typeHeader(bodyType))
mpw.CreatePart(typeHeader(mediaType))
mpw.Close()
}
pr, pw := io.Pipe()
mpw := multipart.NewWriter(pw)
*bodyp = pr
*ctypep = "multipart/related; boundary=" + mpw.Boundary()
go func() {
defer pw.Close()
defer mpw.Close()
w, err := mpw.CreatePart(typeHeader(bodyType))
if err != nil {
return
}
_, err = io.Copy(w, body)
if err != nil {
return
}
w, err = mpw.CreatePart(typeHeader(mediaType))
if err != nil {
return
}
_, err = io.Copy(w, media)
if err != nil {
return
}
}()
return totalContentLength, true
}
func ResolveRelative(basestr, relstr string) string {
u, _ := url.Parse(basestr)
rel, _ := url.Parse(relstr)
u = u.ResolveReference(rel)
us := u.String()
us = strings.Replace(us, "%7B", "{", -1)
us = strings.Replace(us, "%7D", "}", -1)
return us
}
// has4860Fix is whether this Go environment contains the fix for
// http://golang.org/issue/4860
var has4860Fix bool
// init initializes has4860Fix by checking the behavior of the net/http package.
func init() {
r := http.Request{
URL: &url.URL{
Scheme: "http",
Opaque: "//opaque",
},
}
b := &bytes.Buffer{}
r.Write(b)
has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http"))
}
// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it
// don't alter any hex-escaped characters in u.Path.
func SetOpaque(u *url.URL) {
u.Opaque = "//" + u.Host + u.Path
if !has4860Fix {
u.Opaque = u.Scheme + ":" + u.Opaque
}
}

View File

@ -0,0 +1,101 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package googleapi
import (
"bytes"
"fmt"
"net/http"
"net/url"
"strings"
"testing"
)
type SetOpaqueTest struct {
in *url.URL
wantRequestURI string
}
var setOpaqueTests = []SetOpaqueTest{
// no path
{
&url.URL{
Scheme: "http",
Host: "www.golang.org",
},
"http://www.golang.org",
},
// path
{
&url.URL{
Scheme: "http",
Host: "www.golang.org",
Path: "/",
},
"http://www.golang.org/",
},
// file with hex escaping
{
&url.URL{
Scheme: "https",
Host: "www.golang.org",
Path: "/file%20one&two",
},
"https://www.golang.org/file%20one&two",
},
// query
{
&url.URL{
Scheme: "http",
Host: "www.golang.org",
Path: "/",
RawQuery: "q=go+language",
},
"http://www.golang.org/?q=go+language",
},
// file with hex escaping in path plus query
{
&url.URL{
Scheme: "https",
Host: "www.golang.org",
Path: "/file%20one&two",
RawQuery: "q=go+language",
},
"https://www.golang.org/file%20one&two?q=go+language",
},
// query with hex escaping
{
&url.URL{
Scheme: "http",
Host: "www.golang.org",
Path: "/",
RawQuery: "q=go%20language",
},
"http://www.golang.org/?q=go%20language",
},
}
// prefixTmpl is a template for the expected prefix of the output of writing
// an HTTP request.
const prefixTmpl = "GET %v HTTP/1.1\r\nHost: %v\r\n"
func TestSetOpaque(t *testing.T) {
for _, test := range setOpaqueTests {
u := *test.in
SetOpaque(&u)
w := &bytes.Buffer{}
r := &http.Request{URL: &u}
if err := r.Write(w); err != nil {
t.Errorf("write request: %v", err)
continue
}
prefix := fmt.Sprintf(prefixTmpl, test.wantRequestURI, test.in.Host)
if got := string(w.Bytes()); !strings.HasPrefix(got, prefix) {
t.Errorf("got %q expected prefix %q", got, prefix)
}
}
}

View File

@ -0,0 +1,38 @@
// Copyright 2012 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package transport contains HTTP transports used to make
// authenticated API requests.
package transport
import (
"errors"
"net/http"
)
// APIKey is an HTTP Transport which wraps an underlying transport and
// appends an API Key "key" parameter to the URL of outgoing requests.
type APIKey struct {
// Key is the API Key to set on requests.
Key string
// Transport is the underlying HTTP transport.
// If nil, http.DefaultTransport is used.
Transport http.RoundTripper
}
func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {
rt := t.Transport
if rt == nil {
rt = http.DefaultTransport
if rt == nil {
return nil, errors.New("googleapi/transport: no Transport specified or available")
}
}
newReq := *req
args := newReq.URL.Query()
args.Set("key", t.Key)
newReq.URL.RawQuery = args.Encode()
return rt.RoundTrip(&newReq)
}