diff --git a/dev/devcam/env.go b/dev/devcam/env.go index 8038629ad..6ec27594f 100644 --- a/dev/devcam/env.go +++ b/dev/devcam/env.go @@ -155,7 +155,7 @@ func setCamdevVarsFor(e *Env, altkey bool) { } func (e *Env) wipeCacheDir() { - cacheDir, _ := e.m["CAMLI_CACHE_DIR"] + cacheDir := e.m["CAMLI_CACHE_DIR"] if cacheDir == "" { log.Fatal("Could not wipe cache dir, CAMLI_CACHE_DIR not defined") } diff --git a/dev/devcam/hook.go b/dev/devcam/hook.go index ffad8ece3..efcd8bceb 100644 --- a/dev/devcam/hook.go +++ b/dev/devcam/hook.go @@ -201,9 +201,7 @@ func (c *hookCmd) runGofmt() (files []string, err error) { // TODO(mpl): it would be nice to TrimPrefix the pwd from each file to get a shorter output. // However, since git sets the pwd to GIT_DIR before running the pre-commit hook, we lost // the actual pwd from when we ran `git commit`, so no dice so far. - for _, file := range indexFiles { - args = append(args, file) - } + args = append(args, indexFiles...) if c.verbose { fmt.Fprintln(cmdmain.Stderr, commandString("gofmt", args)) diff --git a/internal/azure/storage/client.go b/internal/azure/storage/client.go index 8ef89f2c4..a7dc07431 100644 --- a/internal/azure/storage/client.go +++ b/internal/azure/storage/client.go @@ -244,9 +244,8 @@ func (c *Client) ListBlobs(ctx context.Context, container string, maxResults int return nil, err } - for _, it := range bres.Blobs.Blob { - blobs = append(blobs, it) - } + blobs = append(blobs, bres.Blobs.Blob...) + if bres.NextMarker == "" { // No more blobs to list break diff --git a/internal/chanworker/chanworker.go b/internal/chanworker/chanworker.go index e5acac237..a5cdb7d05 100644 --- a/internal/chanworker/chanworker.go +++ b/internal/chanworker/chanworker.go @@ -107,14 +107,8 @@ func (w *chanWorker) pump() { } func (w *chanWorker) work() { - for { - select { - case n, ok := <-w.workc: - if !ok { - w.donec <- true - return - } - w.fn(n, true) - } + for n := range w.workc { + w.fn(n, true) } + w.donec <- true } diff --git a/internal/httputil/certs.go b/internal/httputil/certs.go index 5af5a0ea4..59b6a0147 100644 --- a/internal/httputil/certs.go +++ b/internal/httputil/certs.go @@ -85,17 +85,17 @@ func GenSelfTLS(hostname string) (certPEM, keyPEM []byte, err error) { if err != nil { return certPEM, keyPEM, fmt.Errorf("failed to create certificate: %s", err) } - var buf bytes.Buffer - if err := pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { + var certBuf bytes.Buffer + if err := pem.Encode(&certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { return certPEM, keyPEM, fmt.Errorf("error writing self-signed HTTPS cert: %v", err) } - certPEM = []byte(buf.String()) + certPEM = certBuf.Bytes() - buf.Reset() - if err := pem.Encode(&buf, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + var keyBuf bytes.Buffer + if err := pem.Encode(&keyBuf, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { return certPEM, keyPEM, fmt.Errorf("error writing self-signed HTTPS private key: %v", err) } - keyPEM = buf.Bytes() + keyPEM = keyBuf.Bytes() return certPEM, keyPEM, nil } diff --git a/internal/images/resize/resize_test.go b/internal/images/resize/resize_test.go index 39cddb4a3..7713be112 100644 --- a/internal/images/resize/resize_test.go +++ b/internal/images/resize/resize_test.go @@ -228,6 +228,9 @@ func init() { } defer r.Close() testIm, err = png.Decode(r) + if err != nil { + panic(err) + } } func fillTestImage(im image.Image) { diff --git a/internal/osutil/restart_unix.go b/internal/osutil/restart_unix.go index 84e69559d..1ea64ba60 100644 --- a/internal/osutil/restart_unix.go +++ b/internal/osutil/restart_unix.go @@ -68,9 +68,7 @@ func RestartProcess(arg ...string) error { var args []string if len(arg) > 0 { args = append(args, os.Args[0]) - for _, v := range arg { - args = append(args, v) - } + args = append(args, arg...) } else { args = os.Args } diff --git a/misc/release/make-release.go b/misc/release/make-release.go index cbe843bb4..431409ae8 100644 --- a/misc/release/make-release.go +++ b/misc/release/make-release.go @@ -729,8 +729,8 @@ func committers() (map[string]string, error) { committerByName[name] = email continue } - c1, _ := commitCountByEmail[firstEmail] - c2, _ := commitCountByEmail[email] + c1 := commitCountByEmail[firstEmail] + c2 := commitCountByEmail[email] if c1 < c2 { delete(committers, firstEmail) } else { diff --git a/pkg/blobserver/azure/azure.go b/pkg/blobserver/azure/azure.go index 6d48517aa..ad0a67bb6 100644 --- a/pkg/blobserver/azure/azure.go +++ b/pkg/blobserver/azure/azure.go @@ -57,8 +57,8 @@ type azureStorage struct { cache *memory.Storage // or nil for no cache } -func (s *azureStorage) String() string { - return fmt.Sprintf("\"azure\" blob storage at host %q, container %q", s.hostname, s.container) +func (sto *azureStorage) String() string { + return fmt.Sprintf("\"azure\" blob storage at host %q, container %q", sto.hostname, sto.container) } func newFromConfig(_ blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { diff --git a/pkg/blobserver/blobpacked/blobpacked.go b/pkg/blobserver/blobpacked/blobpacked.go index b6f293ff5..84a89b429 100644 --- a/pkg/blobserver/blobpacked/blobpacked.go +++ b/pkg/blobserver/blobpacked/blobpacked.go @@ -236,7 +236,7 @@ var ( ) func (s *storage) String() string { - return fmt.Sprintf("\"blobpacked\" storage") + return `"blobpacked" storage` } func (s *storage) Logf(format string, args ...interface{}) { @@ -1337,11 +1337,9 @@ func (pk *packer) writeAZip(ctx context.Context, trunc blob.Ref) (err error) { return fmt.Errorf("File schema blob %v filename had a slash in it: %q", pk.fr.SchemaBlobRef(), baseFileName) } fh := &zip.FileHeader{ - Name: baseFileName, - Method: zip.Store, // uncompressed - } - if m := pk.fr.ModTime(); !m.IsZero() { - fh.SetModTime(m) + Name: baseFileName, + Method: zip.Store, // uncompressed + Modified: pk.fr.ModTime(), } fh.SetMode(0644) fw, err := zw.CreateHeader(fh) diff --git a/pkg/blobserver/blobpacked/subfetch_test.go b/pkg/blobserver/blobpacked/subfetch_test.go index d7e637401..d76e157aa 100644 --- a/pkg/blobserver/blobpacked/subfetch_test.go +++ b/pkg/blobserver/blobpacked/subfetch_test.go @@ -49,7 +49,7 @@ func TestCapOffsetLength(t *testing.T) { if gotErr != tt.wantErr || gotLen != tt.wantLen { var want string if tt.wantErr { - want = fmt.Sprintf("some error") + want = "some error" } else { want = fmt.Sprintf("length %d, no error", tt.wantLen) } diff --git a/pkg/blobserver/files/files.go b/pkg/blobserver/files/files.go index be8c221f3..4a3a37ed2 100644 --- a/pkg/blobserver/files/files.go +++ b/pkg/blobserver/files/files.go @@ -98,7 +98,7 @@ type Storage struct { // SetNewFileGate sets a gate (counting semaphore) on the number of new files // that may be opened for writing at a time. -func (s *Storage) SetNewFileGate(g *syncutil.Gate) { s.tmpFileGate = g } +func (ds *Storage) SetNewFileGate(g *syncutil.Gate) { ds.tmpFileGate = g } func NewStorage(fs VFS, root string) *Storage { return &Storage{ diff --git a/pkg/blobserver/gethandler/get.go b/pkg/blobserver/gethandler/get.go index 6629458ab..42f2a7454 100644 --- a/pkg/blobserver/gethandler/get.go +++ b/pkg/blobserver/gethandler/get.go @@ -84,7 +84,7 @@ func ServeBlobRef(rw http.ResponseWriter, req *http.Request, blobRef blob.Ref, f rw.Header().Set("Content-Type", "application/octet-stream") rw.Header().Set("Cache-Control", fmt.Sprintf("max-age=%d, immutable", int(HTTP_CACHE_DURATION.Seconds()))) - var content io.ReadSeeker = readerutil.NewFakeSeeker(rc, int64(size)) + var content = readerutil.NewFakeSeeker(rc, int64(size)) rangeHeader := req.Header.Get("Range") != "" const small = 32 << 10 var b *blob.Blob diff --git a/pkg/blobserver/google/cloudstorage/storage.go b/pkg/blobserver/google/cloudstorage/storage.go index 7efaf119d..361f648dd 100644 --- a/pkg/blobserver/google/cloudstorage/storage.go +++ b/pkg/blobserver/google/cloudstorage/storage.go @@ -253,11 +253,11 @@ func (s *Storage) Fetch(ctx context.Context, br blob.Ref) (rc io.ReadCloser, siz if err != nil { return nil, 0, err } - if r.Size() >= 1<<32 { + if r.Attrs.Size >= 1<<32 { r.Close() return nil, 0, errors.New("object larger than a uint32") } - size = uint32(r.Size()) + size = uint32(r.Attrs.Size) if size > constants.MaxBlobSize { r.Close() return nil, size, errors.New("object too big") diff --git a/pkg/blobserver/google/drive/service/service.go b/pkg/blobserver/google/drive/service/service.go index 494a470ef..c99db6a99 100644 --- a/pkg/blobserver/google/drive/service/service.go +++ b/pkg/blobserver/google/drive/service/service.go @@ -28,6 +28,7 @@ import ( "os" client "google.golang.org/api/drive/v2" + "google.golang.org/api/option" ) const ( @@ -48,7 +49,7 @@ type DriveService struct { // DriveService (such as Get). If empty, it defaults to the root of the // drive. func New(oauthClient *http.Client, parentID string) (*DriveService, error) { - apiservice, err := client.New(oauthClient) + apiservice, err := client.NewService(context.TODO(), option.WithHTTPClient(oauthClient)) if err != nil { return nil, err } diff --git a/pkg/blobserver/handlers/enumerate.go b/pkg/blobserver/handlers/enumerate.go index 42112e337..246535c24 100644 --- a/pkg/blobserver/handlers/enumerate.go +++ b/pkg/blobserver/handlers/enumerate.go @@ -66,7 +66,7 @@ func handleEnumerateBlobs(rw http.ResponseWriter, req *http.Request, storage blo waitSeconds, _ = strconv.Atoi(formValueMaxWaitSec) if waitSeconds != 0 && formValueAfter != "" { rw.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(rw, errMsgMaxWaitSecWithAfter) + fmt.Fprint(rw, errMsgMaxWaitSecWithAfter) return } switch { diff --git a/pkg/blobserver/localdisk/localdisk.go b/pkg/blobserver/localdisk/localdisk.go index 0164b4263..356640173 100644 --- a/pkg/blobserver/localdisk/localdisk.go +++ b/pkg/blobserver/localdisk/localdisk.go @@ -188,7 +188,7 @@ func (ds *DiskStorage) checkFS() (ret error) { if err != nil { return fmt.Errorf("localdisk check: unable to read from %s, err=%v", tempfile, err) } - if bytes.Compare(out, data) != 0 { + if !bytes.Equal(out, data) { return fmt.Errorf("localdisk check: tempfile contents didn't match, got=%q", out) } if _, err := os.Lstat(filename); !os.IsNotExist(err) { diff --git a/pkg/blobserver/s3/s3.go b/pkg/blobserver/s3/s3.go index 5cd4093c2..1a70ed485 100644 --- a/pkg/blobserver/s3/s3.go +++ b/pkg/blobserver/s3/s3.go @@ -82,11 +82,11 @@ type s3Storage struct { hostname string } -func (s *s3Storage) String() string { - if s.dirPrefix != "" { - return fmt.Sprintf("\"S3\" blob storage at host %q, bucket %q, directory %q", s.hostname, s.bucket, s.dirPrefix) +func (sto *s3Storage) String() string { + if sto.dirPrefix != "" { + return fmt.Sprintf("\"S3\" blob storage at host %q, bucket %q, directory %q", sto.hostname, sto.bucket, sto.dirPrefix) } - return fmt.Sprintf("\"S3\" blob storage at host %q, bucket %q", s.hostname, s.bucket) + return fmt.Sprintf("\"S3\" blob storage at host %q, bucket %q", sto.hostname, sto.bucket) } func newFromConfig(l blobserver.Loader, config jsonconfig.Obj) (blobserver.Storage, error) { @@ -116,7 +116,10 @@ func newFromConfigWithTransport(_ blobserver.Loader, config jsonconfig.Obj, tran httpClient.Transport = transport s3Cfg.WithHTTPClient(&httpClient) } - awsSession := session.New(s3Cfg) + awsSession, err := session.NewSession(s3Cfg) + if err != nil { + return nil, err + } bucket := config.RequiredString("bucket") var dirPrefix string diff --git a/pkg/blobserver/s3/s3_preflight.go b/pkg/blobserver/s3/s3_preflight.go index ef80ed720..b25636aeb 100644 --- a/pkg/blobserver/s3/s3_preflight.go +++ b/pkg/blobserver/s3/s3_preflight.go @@ -182,7 +182,7 @@ func determineEndpoint(ctx context.Context, svc s3iface.S3API, endpoint, bucket, func endpointIsOfficial(endpoint string) (bool, string, error) { for _, partition := range endpoints.DefaultPartitions() { for _, region := range partition.Regions() { - s3Endpoint, err := region.ResolveEndpoint(endpoints.S3ServiceID) + s3Endpoint, err := region.ResolveEndpoint(endpoints.S3ServiceID) //lint:ignore SA1019 TODO fix this and caller if err != nil { // S3 isn't available in this region yet; unlikely to ever happen continue diff --git a/pkg/blobserver/stat.go b/pkg/blobserver/stat.go index 41717a40a..458922369 100644 --- a/pkg/blobserver/stat.go +++ b/pkg/blobserver/stat.go @@ -74,6 +74,7 @@ func StatBlobsParallelHelper(ctx context.Context, blobs []blob.Ref, fn func(blob var fnMu sync.Mutex // serializes calls to fn var wg syncutil.Group +Blobs: for i := range blobs { gate.Start() b := blobs[i] @@ -81,7 +82,7 @@ func StatBlobsParallelHelper(ctx context.Context, blobs []blob.Ref, fn func(blob select { case <-ctx.Done(): // If a previous failed, stop. - break + break Blobs default: } diff --git a/pkg/importer/oauth.go b/pkg/importer/oauth.go index c0697b580..f44051342 100644 --- a/pkg/importer/oauth.go +++ b/pkg/importer/oauth.go @@ -222,8 +222,8 @@ type OAuthURIs struct { // NewOAuthClient returns an oauth Client configured with uris and the // credentials obtained from ctx. -func (ctx *SetupContext) NewOAuthClient(uris OAuthURIs) (*oauth.Client, error) { - clientID, secret, err := ctx.Credentials() +func (sc *SetupContext) NewOAuthClient(uris OAuthURIs) (*oauth.Client, error) { + clientID, secret, err := sc.Credentials() if err != nil { return nil, err } diff --git a/pkg/importer/picasa/picasa.go b/pkg/importer/picasa/picasa.go index b154b4d7c..075c474ad 100644 --- a/pkg/importer/picasa/picasa.go +++ b/pkg/importer/picasa/picasa.go @@ -324,6 +324,9 @@ func (r *run) importAlbums(ctx context.Context) error { return fmt.Errorf("importAlbums: error listing albums: %v", err) } albumsNode, err := r.getTopLevelNode("albums", "Albums") + if err != nil { + return fmt.Errorf("getting picasa top-level Albums node: %w", err) + } for _, album := range albums { select { case <-ctx.Done(): diff --git a/pkg/index/corpus.go b/pkg/index/corpus.go index 396d13c1f..08861c04b 100644 --- a/pkg/index/corpus.go +++ b/pkg/index/corpus.go @@ -1162,7 +1162,7 @@ func (c *Corpus) PermanodeTime(pn blob.Ref) (t time.Time, ok bool) { var fi camtypes.FileInfo ccRef, ccTime, ok := c.pnCamliContent(pn) if ok { - fi, _ = c.files[ccRef] + fi = c.files[ccRef] } if fi.Time != nil { return time.Time(*fi.Time), true diff --git a/staticcheck.conf b/staticcheck.conf index 1c1531e54..85f9707ee 100644 --- a/staticcheck.conf +++ b/staticcheck.conf @@ -3,4 +3,10 @@ checks = ["all", "-ST1005", "-ST1003", "-ST1019", + + "-ST1016", + + "-ST1000", + "-ST1020", + "-ST1021", ]