Developer option: extra blob paths (#4566)

* Allow additional read-only blob paths
* Add developer option to add more blob sources
* Add makefile targets to start and remove build container
* Documentation
This commit is contained in:
WithoutPants 2024-02-16 12:39:45 +11:00 committed by GitHub
parent 8fc997dfe9
commit 440c261f5b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 144 additions and 65 deletions

View File

@ -48,6 +48,11 @@ GO_BUILD_TAGS += sqlite_stat4 sqlite_math_functions
export CGO_ENABLED := 1
# define COMPILER_IMAGE for cross-compilation docker container
ifndef COMPILER_IMAGE
COMPILER_IMAGE := stashapp/compiler:latest
endif
.PHONY: release
release: pre-ui generate ui build-release
@ -378,3 +383,16 @@ docker-build: build-info
.PHONY: docker-cuda-build
docker-cuda-build: build-info
docker build --build-arg GITHASH=$(GITHASH) --build-arg STASH_VERSION=$(STASH_VERSION) -t stash/cuda-build -f docker/build/x86_64/Dockerfile-CUDA .
# start the build container - for cross compilation
# this is adapted from the github actions build.yml file
.PHONY: start-compiler-container
start-compiler-container:
docker run -d --name build --mount type=bind,source="$(PWD)",target=/stash,consistency=delegated $(EXTRA_CONTAINER_ARGS) -w /stash $(COMPILER_IMAGE) tail -f /dev/null
# run the cross-compilation using
# docker exec -t build /bin/bash -c "make build-cc-<platform>"
.PHONY: remove-compiler-container
remove-compiler-container:
docker rm -f -v build

View File

@ -260,6 +260,9 @@ const (
// File upload options
MaxUploadSize = "max_upload_size"
// Developer options
ExtraBlobsPaths = "developer_options.extra_blob_paths"
)
// slice default values
@ -561,6 +564,12 @@ func (i *Config) GetBlobsPath() string {
return i.getString(BlobsPath)
}
// GetExtraBlobsPaths returns extra blobs paths.
// For developer/advanced use only.
func (i *Config) GetExtraBlobsPaths() []string {
return i.getStringSlice(ExtraBlobsPaths)
}
func (i *Config) GetBlobsStorage() BlobsStorageType {
ret := BlobsStorageType(i.getString(BlobsStorage))

View File

@ -75,11 +75,13 @@ func GetInstance() *Manager {
func (s *Manager) SetBlobStoreOptions() {
storageType := s.Config.GetBlobsStorage()
blobsPath := s.Config.GetBlobsPath()
extraBlobsPaths := s.Config.GetExtraBlobsPaths()
s.Database.SetBlobStoreOptions(sqlite.BlobStoreOptions{
UseFilesystem: storageType == config.BlobStorageTypeFilesystem,
UseDatabase: storageType == config.BlobStorageTypeDatabase,
Path: blobsPath,
UseFilesystem: storageType == config.BlobStorageTypeFilesystem,
UseDatabase: storageType == config.BlobStorageTypeDatabase,
Path: blobsPath,
SupplementaryPaths: extraBlobsPaths,
})
}

View File

@ -31,6 +31,9 @@ type BlobStoreOptions struct {
UseDatabase bool
// Path is the filesystem path to use for storing blobs
Path string
// SupplementaryPaths are alternative filesystem paths that will be used to find blobs
// No changes will be made to these filesystems
SupplementaryPaths []string
}
type BlobStore struct {
@ -39,11 +42,15 @@ type BlobStore struct {
tableMgr *table
fsStore *blob.FilesystemStore
options BlobStoreOptions
// supplementary stores
otherStores []blob.FilesystemReader
options BlobStoreOptions
}
func NewBlobStore(options BlobStoreOptions) *BlobStore {
return &BlobStore{
fs := &file.OsFS{}
ret := &BlobStore{
repository: repository{
tableName: blobTable,
idColumn: blobChecksumColumn,
@ -51,9 +58,15 @@ func NewBlobStore(options BlobStoreOptions) *BlobStore {
tableMgr: blobTableMgr,
fsStore: blob.NewFilesystemStore(options.Path, &file.OsFS{}),
fsStore: blob.NewFilesystemStore(options.Path, fs),
options: options,
}
for _, otherPath := range options.SupplementaryPaths {
ret.otherStores = append(ret.otherStores, *blob.NewReadonlyFilesystemStore(otherPath, fs))
}
return ret
}
type blobRow struct {
@ -188,14 +201,12 @@ func (qb *BlobStore) readSQL(ctx context.Context, querySQL string, args ...inter
// don't use the filesystem if not configured to do so
if qb.options.UseFilesystem {
ret, err := qb.fsStore.Read(ctx, checksum)
if err == nil {
return ret, checksum, nil
ret, err := qb.readFromFilesystem(ctx, checksum)
if err != nil {
return nil, checksum, err
}
if !errors.Is(err, fs.ErrNotExist) {
return nil, checksum, fmt.Errorf("reading from filesystem: %w", err)
}
return ret, checksum, nil
}
return nil, checksum, &ChecksumBlobNotExistError{
@ -203,6 +214,27 @@ func (qb *BlobStore) readSQL(ctx context.Context, querySQL string, args ...inter
}
}
func (qb *BlobStore) readFromFilesystem(ctx context.Context, checksum string) ([]byte, error) {
// try to read from primary store first, then supplementaries
fsStores := append([]blob.FilesystemReader{qb.fsStore.FilesystemReader}, qb.otherStores...)
for _, fsStore := range fsStores {
ret, err := fsStore.Read(ctx, checksum)
if err == nil {
return ret, nil
}
if !errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("reading from filesystem: %w", err)
}
}
// blob not found - should not happen
return nil, &ChecksumBlobNotExistError{
Checksum: checksum,
}
}
// Read reads the data from the database or filesystem, depending on which is enabled.
func (qb *BlobStore) Read(ctx context.Context, checksum string) ([]byte, error) {
if !qb.options.UseDatabase && !qb.options.UseFilesystem {
@ -228,14 +260,7 @@ func (qb *BlobStore) Read(ctx context.Context, checksum string) ([]byte, error)
// don't use the filesystem if not configured to do so
if qb.options.UseFilesystem {
ret, err := qb.fsStore.Read(ctx, checksum)
if err == nil {
return ret, nil
}
if !errors.Is(err, fs.ErrNotExist) {
return nil, fmt.Errorf("reading from filesystem: %w", err)
}
return qb.readFromFilesystem(ctx, checksum)
}
// blob not found - should not happen

View File

@ -19,65 +19,34 @@ const (
blobsDirLength int = 2 // thumbDirDepth * thumbDirLength must be smaller than the length of checksum
)
type FS interface {
type FSReader interface {
Open(name string) (fs.ReadDirFile, error)
}
type FSWriter interface {
Create(name string) (*os.File, error)
MkdirAll(path string, perm fs.FileMode) error
Open(name string) (fs.ReadDirFile, error)
Remove(name string) error
file.RenamerRemover
}
type FilesystemStore struct {
deleter *file.Deleter
path string
fs FS
type FS interface {
FSReader
FSWriter
}
func NewFilesystemStore(path string, fs FS) *FilesystemStore {
deleter := &file.Deleter{
RenamerRemover: fs,
}
return &FilesystemStore{
deleter: deleter,
path: path,
fs: fs,
}
type FilesystemReader struct {
path string
fs FSReader
}
func (s *FilesystemStore) checksumToPath(checksum string) string {
func (s *FilesystemReader) checksumToPath(checksum string) string {
return filepath.Join(s.path, fsutil.GetIntraDir(checksum, blobsDirDepth, blobsDirLength), checksum)
}
func (s *FilesystemStore) Write(ctx context.Context, checksum string, data []byte) error {
if s.path == "" {
return fmt.Errorf("no path set")
}
fn := s.checksumToPath(checksum)
// create the directory if it doesn't exist
if err := s.fs.MkdirAll(filepath.Dir(fn), 0755); err != nil {
return fmt.Errorf("creating directory %q: %w", filepath.Dir(fn), err)
}
logger.Debugf("Writing blob file %s", fn)
out, err := s.fs.Create(fn)
if err != nil {
return fmt.Errorf("creating file %q: %w", fn, err)
}
r := bytes.NewReader(data)
if _, err = io.Copy(out, r); err != nil {
return fmt.Errorf("writing file %q: %w", fn, err)
}
return nil
}
func (s *FilesystemStore) Read(ctx context.Context, checksum string) ([]byte, error) {
func (s *FilesystemReader) Read(ctx context.Context, checksum string) ([]byte, error) {
if s.path == "" {
return nil, fmt.Errorf("no path set")
}
@ -93,6 +62,61 @@ func (s *FilesystemStore) Read(ctx context.Context, checksum string) ([]byte, er
return io.ReadAll(f)
}
type FilesystemStore struct {
FilesystemReader
deleter *file.Deleter
}
func NewFilesystemStore(path string, fs FS) *FilesystemStore {
deleter := &file.Deleter{
RenamerRemover: fs,
}
return &FilesystemStore{
FilesystemReader: *NewReadonlyFilesystemStore(path, fs),
deleter: deleter,
}
}
func NewReadonlyFilesystemStore(path string, fs FSReader) *FilesystemReader {
return &FilesystemReader{
path: path,
fs: fs,
}
}
func (s *FilesystemStore) Write(ctx context.Context, checksum string, data []byte) error {
fs, ok := s.fs.(FS)
if !ok {
return fmt.Errorf("internal error: fs is not an FS")
}
if s.path == "" {
return fmt.Errorf("no path set")
}
fn := s.checksumToPath(checksum)
// create the directory if it doesn't exist
if err := fs.MkdirAll(filepath.Dir(fn), 0755); err != nil {
return fmt.Errorf("creating directory %q: %w", filepath.Dir(fn), err)
}
logger.Debugf("Writing blob file %s", fn)
out, err := fs.Create(fn)
if err != nil {
return fmt.Errorf("creating file %q: %w", fn, err)
}
r := bytes.NewReader(data)
if _, err = io.Copy(out, r); err != nil {
return fmt.Errorf("writing file %q: %w", fn, err)
}
return nil
}
func (s *FilesystemStore) Delete(ctx context.Context, checksum string) error {
if s.path == "" {
return fmt.Errorf("no path set")

View File

@ -147,6 +147,7 @@ These options are typically not exposed in the UI and must be changed manually i
|-------|---------|
| `custom_served_folders` | A map of URLs to file system folders. See below. |
| `custom_ui_location` | The file system folder where the UI files will be served from, instead of using the embedded UI. Empty to disable. Stash must be restarted to take effect. |
| `developer_options.extra_blob_paths` | A list of alternative blob paths. These paths will be read for blob files. Blobs will not be written or deleted from these paths. Intended for developer use only. |
| `max_upload_size` | Maximum file upload size for import files. Defaults to 1GB. |
| `theme_color` | Sets the `theme-color` property in the UI. |
| `gallery_cover_regex` | The regex responsible for selecting images as gallery covers |