stash/pkg/manager/manager_tasks.go

707 lines
18 KiB
Go
Raw Normal View History

2019-02-11 06:39:21 +00:00
package manager
import (
"context"
2020-09-15 07:28:53 +00:00
"errors"
"fmt"
"strconv"
"sync"
"time"
"github.com/remeh/sizedwaitgroup"
2021-05-24 04:24:18 +00:00
"github.com/stashapp/stash/pkg/job"
2019-02-14 23:42:52 +00:00
"github.com/stashapp/stash/pkg/logger"
"github.com/stashapp/stash/pkg/manager/config"
2019-02-14 23:42:52 +00:00
"github.com/stashapp/stash/pkg/models"
"github.com/stashapp/stash/pkg/utils"
2019-02-11 06:39:21 +00:00
)
func isGallery(pathname string) bool {
gExt := config.GetInstance().GetGalleryExtensions()
return matchExtension(pathname, gExt)
}
func isVideo(pathname string) bool {
vidExt := config.GetInstance().GetVideoExtensions()
return matchExtension(pathname, vidExt)
}
func isImage(pathname string) bool {
imgExt := config.GetInstance().GetImageExtensions()
return matchExtension(pathname, imgExt)
}
2020-11-15 22:20:04 +00:00
func getScanPaths(inputPaths []string) []*models.StashConfig {
if len(inputPaths) == 0 {
return config.GetInstance().GetStashPaths()
2020-11-15 22:20:04 +00:00
}
var ret []*models.StashConfig
for _, p := range inputPaths {
s := getStashFromDirPath(p)
if s == nil {
logger.Warnf("%s is not in the configured stash paths", p)
continue
}
// make a copy, changing the path
ss := *s
ss.Path = p
ret = append(ret, &ss)
}
return ret
}
2021-05-24 04:24:18 +00:00
// ScanSubscribe subscribes to a notification that is triggered when a
// scan or clean is complete.
func (s *singleton) ScanSubscribe(ctx context.Context) <-chan bool {
return s.scanSubs.subscribe(ctx)
}
func (s *singleton) Scan(ctx context.Context, input models.ScanMetadataInput) (int, error) {
if err := s.validateFFMPEG(); err != nil {
2021-05-24 04:24:18 +00:00
return 0, err
}
2021-05-24 04:24:18 +00:00
scanJob := ScanJob{
txnManager: s.TxnManager,
input: input,
subscriptions: s.scanSubs,
}
return s.JobManager.Add(ctx, "Scanning...", &scanJob), nil
2019-02-11 06:39:21 +00:00
}
func (s *singleton) Import(ctx context.Context) (int, error) {
config := config.GetInstance()
metadataPath := config.GetMetadataPath()
if metadataPath == "" {
2021-05-24 04:24:18 +00:00
return 0, errors.New("metadata path must be set in config")
}
2021-05-24 04:24:18 +00:00
j := job.MakeJobExec(func(ctx context.Context, progress *job.Progress) {
task := ImportTask{
txnManager: s.TxnManager,
BaseDir: metadataPath,
Reset: true,
DuplicateBehaviour: models.ImportDuplicateEnumFail,
MissingRefBehaviour: models.ImportMissingRefEnumFail,
fileNamingAlgorithm: config.GetVideoFileNamingAlgorithm(),
}
Toward better context handling (#1835) * Use the request context The code uses context.Background() in a flow where there is a http.Request. Use the requests context instead. * Use a true context in the plugin example Let AddTag/RemoveTag take a context and use that context throughout the example. * Avoid the use of context.Background Prefer context.TODO over context.Background deep in the call chain. This marks the site as something which we need to context-handle later, and also makes it clear to the reader that the context is sort-of temporary in the code base. While here, be consistent in handling the `act` variable in each branch of the if .. { .. } .. check. * Prefer context.TODO over context.Background For the different scraping operations here, there is a context higher up the call chain, which we ought to use. Mark the call-sites as TODO for now, so we can come back later on a sweep of which parts can be context-lifted. * Thread context upwards Initialization requires context for transactions. Thread the context upward the call chain. At the intialization call, add a context.TODO since we can't break this yet. The singleton assumption prevents us from pulling it up into main for now. * make tasks context-aware Change the task interface to understand contexts. Pass the context down in some of the branches where it is needed. * Make QueryStashBoxScene context-aware This call naturally sits inside the request-context. Use it. * Introduce a context in the JS plugin code This allows us to use a context for HTTP calls inside the system. Mark the context with a TODO at top level for now. * Nitpick error formatting Use %v rather than %s for error interfaces. Do not begin an error strong with a capital letter. * Avoid the use of http.Get in FFMPEG download chain Since http.Get has no context, it isn't possible to break out or have policy induced. The call will block until the GET completes. Rewrite to use a http Request and provide a context. Thread the context through the call chain for now. provide context.TODO() at the top level of the initialization chain. * Make getRemoteCDPWSAddress aware of contexts Eliminate a call to http.Get and replace it with a context-aware variant. Push the context upwards in the call chain, but plug it before the scraper interface so we don't have to rewrite said interface yet. Plugged with context.TODO() * Scraper: make the getImage function context-aware Use a context, and pass it upwards. Plug it with context.TODO() up the chain before the rewrite gets too much out of hand for now. Minor tweaks along the way, remove a call to context.Background() deep in the call chain. * Make NOTIFY request context-aware The call sits inside a Request-handler. So it's natural to use the requests context as the context for the outgoing HTTP request. * Use a context in the url scraper code We are sitting in code which has a context, so utilize it for the request as well. * Use a context when checking versions When we check the version of stash on Github, use a context. Thread the context up to the initialization routine of the HTTP/GraphQL server and plug it with a context.TODO() for now. This paves the way for providing a context to the HTTP server code in a future patch. * Make utils func ReadImage context-aware In almost all of the cases, there is a context in the call chain which is a natural use. This is true for all the GraphQL mutations. The exception is in task_stash_box_tag, so plug that task with context.TODO() for now. * Make stash-box get context-aware Thread a context through the call chain until we hit the Client API. Plug it with context.TODO() there for now. * Enable the noctx linter The code is now free of any uncontexted HTTP request. This means we pass the noctx linter, and we can enable it in the code base.
2021-10-14 04:32:41 +00:00
task.Start(ctx)
2021-05-24 04:24:18 +00:00
})
return s.JobManager.Add(ctx, "Importing...", j), nil
2019-02-11 06:39:21 +00:00
}
func (s *singleton) Export(ctx context.Context) (int, error) {
config := config.GetInstance()
metadataPath := config.GetMetadataPath()
if metadataPath == "" {
2021-05-24 04:24:18 +00:00
return 0, errors.New("metadata path must be set in config")
}
2019-02-11 06:39:21 +00:00
2021-05-24 04:24:18 +00:00
j := job.MakeJobExec(func(ctx context.Context, progress *job.Progress) {
2019-02-11 06:39:21 +00:00
var wg sync.WaitGroup
wg.Add(1)
task := ExportTask{
txnManager: s.TxnManager,
full: true,
fileNamingAlgorithm: config.GetVideoFileNamingAlgorithm(),
}
2021-05-24 04:24:18 +00:00
task.Start(&wg)
})
return s.JobManager.Add(ctx, "Exporting...", j), nil
2019-02-11 06:39:21 +00:00
}
func (s *singleton) RunSingleTask(ctx context.Context, t Task) int {
2020-09-15 07:28:53 +00:00
var wg sync.WaitGroup
wg.Add(1)
2021-05-24 04:24:18 +00:00
j := job.MakeJobExec(func(ctx context.Context, progress *job.Progress) {
Toward better context handling (#1835) * Use the request context The code uses context.Background() in a flow where there is a http.Request. Use the requests context instead. * Use a true context in the plugin example Let AddTag/RemoveTag take a context and use that context throughout the example. * Avoid the use of context.Background Prefer context.TODO over context.Background deep in the call chain. This marks the site as something which we need to context-handle later, and also makes it clear to the reader that the context is sort-of temporary in the code base. While here, be consistent in handling the `act` variable in each branch of the if .. { .. } .. check. * Prefer context.TODO over context.Background For the different scraping operations here, there is a context higher up the call chain, which we ought to use. Mark the call-sites as TODO for now, so we can come back later on a sweep of which parts can be context-lifted. * Thread context upwards Initialization requires context for transactions. Thread the context upward the call chain. At the intialization call, add a context.TODO since we can't break this yet. The singleton assumption prevents us from pulling it up into main for now. * make tasks context-aware Change the task interface to understand contexts. Pass the context down in some of the branches where it is needed. * Make QueryStashBoxScene context-aware This call naturally sits inside the request-context. Use it. * Introduce a context in the JS plugin code This allows us to use a context for HTTP calls inside the system. Mark the context with a TODO at top level for now. * Nitpick error formatting Use %v rather than %s for error interfaces. Do not begin an error strong with a capital letter. * Avoid the use of http.Get in FFMPEG download chain Since http.Get has no context, it isn't possible to break out or have policy induced. The call will block until the GET completes. Rewrite to use a http Request and provide a context. Thread the context through the call chain for now. provide context.TODO() at the top level of the initialization chain. * Make getRemoteCDPWSAddress aware of contexts Eliminate a call to http.Get and replace it with a context-aware variant. Push the context upwards in the call chain, but plug it before the scraper interface so we don't have to rewrite said interface yet. Plugged with context.TODO() * Scraper: make the getImage function context-aware Use a context, and pass it upwards. Plug it with context.TODO() up the chain before the rewrite gets too much out of hand for now. Minor tweaks along the way, remove a call to context.Background() deep in the call chain. * Make NOTIFY request context-aware The call sits inside a Request-handler. So it's natural to use the requests context as the context for the outgoing HTTP request. * Use a context in the url scraper code We are sitting in code which has a context, so utilize it for the request as well. * Use a context when checking versions When we check the version of stash on Github, use a context. Thread the context up to the initialization routine of the HTTP/GraphQL server and plug it with a context.TODO() for now. This paves the way for providing a context to the HTTP server code in a future patch. * Make utils func ReadImage context-aware In almost all of the cases, there is a context in the call chain which is a natural use. This is true for all the GraphQL mutations. The exception is in task_stash_box_tag, so plug that task with context.TODO() for now. * Make stash-box get context-aware Thread a context through the call chain until we hit the Client API. Plug it with context.TODO() there for now. * Enable the noctx linter The code is now free of any uncontexted HTTP request. This means we pass the noctx linter, and we can enable it in the code base.
2021-10-14 04:32:41 +00:00
t.Start(ctx)
wg.Done()
2021-05-24 04:24:18 +00:00
})
2020-09-15 07:28:53 +00:00
return s.JobManager.Add(ctx, t.GetDescription(), j)
2020-09-15 07:28:53 +00:00
}
func setGeneratePreviewOptionsInput(optionsInput *models.GeneratePreviewOptionsInput) {
config := config.GetInstance()
if optionsInput.PreviewSegments == nil {
val := config.GetPreviewSegments()
optionsInput.PreviewSegments = &val
}
if optionsInput.PreviewSegmentDuration == nil {
val := config.GetPreviewSegmentDuration()
optionsInput.PreviewSegmentDuration = &val
}
if optionsInput.PreviewExcludeStart == nil {
val := config.GetPreviewExcludeStart()
optionsInput.PreviewExcludeStart = &val
}
if optionsInput.PreviewExcludeEnd == nil {
val := config.GetPreviewExcludeEnd()
optionsInput.PreviewExcludeEnd = &val
}
if optionsInput.PreviewPreset == nil {
val := config.GetPreviewPreset()
optionsInput.PreviewPreset = &val
}
}
func (s *singleton) Generate(ctx context.Context, input models.GenerateMetadataInput) (int, error) {
if err := s.validateFFMPEG(); err != nil {
2021-05-24 04:24:18 +00:00
return 0, err
}
Errcheck phase 1 (#1715) * Avoid redundant logging in migrations Return the error and let the caller handle the logging of the error if needed. While here, defer m.Close() to the function boundary. * Treat errors as values Use %v rather than %s and pass the errors directly. * Generate a wrapped error on stat-failure * Log 3 unchecked errors Rather than ignore errors, log them at the WARNING log level. The server has been functioning without these, so assume they are not at the ERROR level. * Propagate errors upward Failure in path generation was ignored. Propagate the errors upward the call stack, so it can be handled at the level of orchestration. * Warn on errors Log errors rather than quenching them. Errors are logged at the Warn-level for now. * Check error when creating test databases Use the builtin log package and stop the program fatally on error. * Add warnings to uncheck task errors Focus on the task system in a single commit, logging unchecked errors as warnings. * Warn-on-error in API routes Look through the API routes, and make sure errors are being logged if they occur. Prefer the Warn-log-level because none of these has proven to be fatal in the system up until now. * Propagate error when adding Util API * Propagate error on adding util API * Return unhandled error * JS log API: propagate and log errors * JS Plugins: log GQL addition failures. * Warn on failure to write to stdin * Warn on failure to stop task * Wrap viper.BindEnv The current viper code only errors if no name is provided, so it should never fail. Rewrite the code flow to factor through a panic-function. This removes error warnings from this part of the code. * Log errors in concurrency test If we can't initialize the configuration, treat the test as a failure. * Warn on errors in configuration code * Plug an unchecked error in gallery zip walking * Warn on screenshot serving failure * Warn on encoder screenshot failure * Warn on errors in path-handling code * Undo the errcheck on configurations for now. * Use one-line initializers where applicable rather than using err := f() if err!= nil { .. prefer the shorter if err := f(); err != nil { .. If f() isn't too long of a name, or wraps a function with a body.
2021-09-20 23:34:25 +00:00
if err := instance.Paths.Generated.EnsureTmpDir(); err != nil {
logger.Warnf("could not generate temporary directory: %v", err)
}
2019-02-11 06:39:21 +00:00
sceneIDs, err := utils.StringSliceToIntSlice(input.SceneIDs)
if err != nil {
logger.Error(err.Error())
}
markerIDs, err := utils.StringSliceToIntSlice(input.MarkerIDs)
if err != nil {
logger.Error(err.Error())
}
2021-05-24 04:24:18 +00:00
// TODO - formalise this
j := job.MakeJobExec(func(ctx context.Context, progress *job.Progress) {
var scenes []*models.Scene
var err error
var markers []*models.SceneMarker
if err := s.TxnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
qb := r.Scene()
if len(sceneIDs) > 0 {
scenes, err = qb.FindMany(sceneIDs)
} else {
scenes, err = qb.All()
}
if err != nil {
return err
}
if len(markerIDs) > 0 {
markers, err = r.SceneMarker().FindMany(markerIDs)
if err != nil {
return err
}
}
return nil
}); err != nil {
logger.Error(err.Error())
2019-02-11 06:39:21 +00:00
return
}
config := config.GetInstance()
parallelTasks := config.GetParallelTasksWithAutoDetection()
logger.Infof("Generate started with %d parallel tasks", parallelTasks)
wg := sizedwaitgroup.New(parallelTasks)
lenScenes := len(scenes)
total := lenScenes + len(markers)
2021-05-24 04:24:18 +00:00
progress.SetTotal(total)
2021-05-24 04:24:18 +00:00
if job.IsCancelled(ctx) {
logger.Info("Stopping due to user request")
return
}
2021-05-24 04:24:18 +00:00
// TODO - consider removing this. Even though we're only waiting a maximum of
// 90 seconds for this, it is all for a simple log message, and probably not worth
// waiting for
var totalsNeeded *totalsGenerate
progress.ExecuteTask("Calculating content to generate...", func() {
totalsNeeded = s.neededGenerate(scenes, input)
if totalsNeeded == nil {
logger.Infof("Taking too long to count content. Skipping...")
logger.Infof("Generating content")
} else {
logger.Infof("Generating %d sprites %d previews %d image previews %d markers %d transcodes %d phashes", totalsNeeded.sprites, totalsNeeded.previews, totalsNeeded.imagePreviews, totalsNeeded.markers, totalsNeeded.transcodes, totalsNeeded.phashes)
}
})
2020-08-06 01:21:14 +00:00
fileNamingAlgo := config.GetVideoFileNamingAlgorithm()
overwrite := false
if input.Overwrite != nil {
overwrite = *input.Overwrite
}
generatePreviewOptions := input.PreviewOptions
if generatePreviewOptions == nil {
generatePreviewOptions = &models.GeneratePreviewOptionsInput{}
}
setGeneratePreviewOptionsInput(generatePreviewOptions)
// Start measuring how long the generate has taken. (consider moving this up)
start := time.Now()
Errcheck phase 1 (#1715) * Avoid redundant logging in migrations Return the error and let the caller handle the logging of the error if needed. While here, defer m.Close() to the function boundary. * Treat errors as values Use %v rather than %s and pass the errors directly. * Generate a wrapped error on stat-failure * Log 3 unchecked errors Rather than ignore errors, log them at the WARNING log level. The server has been functioning without these, so assume they are not at the ERROR level. * Propagate errors upward Failure in path generation was ignored. Propagate the errors upward the call stack, so it can be handled at the level of orchestration. * Warn on errors Log errors rather than quenching them. Errors are logged at the Warn-level for now. * Check error when creating test databases Use the builtin log package and stop the program fatally on error. * Add warnings to uncheck task errors Focus on the task system in a single commit, logging unchecked errors as warnings. * Warn-on-error in API routes Look through the API routes, and make sure errors are being logged if they occur. Prefer the Warn-log-level because none of these has proven to be fatal in the system up until now. * Propagate error when adding Util API * Propagate error on adding util API * Return unhandled error * JS log API: propagate and log errors * JS Plugins: log GQL addition failures. * Warn on failure to write to stdin * Warn on failure to stop task * Wrap viper.BindEnv The current viper code only errors if no name is provided, so it should never fail. Rewrite the code flow to factor through a panic-function. This removes error warnings from this part of the code. * Log errors in concurrency test If we can't initialize the configuration, treat the test as a failure. * Warn on errors in configuration code * Plug an unchecked error in gallery zip walking * Warn on screenshot serving failure * Warn on encoder screenshot failure * Warn on errors in path-handling code * Undo the errcheck on configurations for now. * Use one-line initializers where applicable rather than using err := f() if err!= nil { .. prefer the shorter if err := f(); err != nil { .. If f() isn't too long of a name, or wraps a function with a body.
2021-09-20 23:34:25 +00:00
if err = instance.Paths.Generated.EnsureTmpDir(); err != nil {
logger.Warnf("could not create temporary directory: %v", err)
Errcheck phase 1 (#1715) * Avoid redundant logging in migrations Return the error and let the caller handle the logging of the error if needed. While here, defer m.Close() to the function boundary. * Treat errors as values Use %v rather than %s and pass the errors directly. * Generate a wrapped error on stat-failure * Log 3 unchecked errors Rather than ignore errors, log them at the WARNING log level. The server has been functioning without these, so assume they are not at the ERROR level. * Propagate errors upward Failure in path generation was ignored. Propagate the errors upward the call stack, so it can be handled at the level of orchestration. * Warn on errors Log errors rather than quenching them. Errors are logged at the Warn-level for now. * Check error when creating test databases Use the builtin log package and stop the program fatally on error. * Add warnings to uncheck task errors Focus on the task system in a single commit, logging unchecked errors as warnings. * Warn-on-error in API routes Look through the API routes, and make sure errors are being logged if they occur. Prefer the Warn-log-level because none of these has proven to be fatal in the system up until now. * Propagate error when adding Util API * Propagate error on adding util API * Return unhandled error * JS log API: propagate and log errors * JS Plugins: log GQL addition failures. * Warn on failure to write to stdin * Warn on failure to stop task * Wrap viper.BindEnv The current viper code only errors if no name is provided, so it should never fail. Rewrite the code flow to factor through a panic-function. This removes error warnings from this part of the code. * Log errors in concurrency test If we can't initialize the configuration, treat the test as a failure. * Warn on errors in configuration code * Plug an unchecked error in gallery zip walking * Warn on screenshot serving failure * Warn on encoder screenshot failure * Warn on errors in path-handling code * Undo the errcheck on configurations for now. * Use one-line initializers where applicable rather than using err := f() if err!= nil { .. prefer the shorter if err := f(); err != nil { .. If f() isn't too long of a name, or wraps a function with a body.
2021-09-20 23:34:25 +00:00
}
2021-05-24 04:24:18 +00:00
for _, scene := range scenes {
progress.Increment()
if job.IsCancelled(ctx) {
logger.Info("Stopping due to user request")
wg.Wait()
Errcheck phase 1 (#1715) * Avoid redundant logging in migrations Return the error and let the caller handle the logging of the error if needed. While here, defer m.Close() to the function boundary. * Treat errors as values Use %v rather than %s and pass the errors directly. * Generate a wrapped error on stat-failure * Log 3 unchecked errors Rather than ignore errors, log them at the WARNING log level. The server has been functioning without these, so assume they are not at the ERROR level. * Propagate errors upward Failure in path generation was ignored. Propagate the errors upward the call stack, so it can be handled at the level of orchestration. * Warn on errors Log errors rather than quenching them. Errors are logged at the Warn-level for now. * Check error when creating test databases Use the builtin log package and stop the program fatally on error. * Add warnings to uncheck task errors Focus on the task system in a single commit, logging unchecked errors as warnings. * Warn-on-error in API routes Look through the API routes, and make sure errors are being logged if they occur. Prefer the Warn-log-level because none of these has proven to be fatal in the system up until now. * Propagate error when adding Util API * Propagate error on adding util API * Return unhandled error * JS log API: propagate and log errors * JS Plugins: log GQL addition failures. * Warn on failure to write to stdin * Warn on failure to stop task * Wrap viper.BindEnv The current viper code only errors if no name is provided, so it should never fail. Rewrite the code flow to factor through a panic-function. This removes error warnings from this part of the code. * Log errors in concurrency test If we can't initialize the configuration, treat the test as a failure. * Warn on errors in configuration code * Plug an unchecked error in gallery zip walking * Warn on screenshot serving failure * Warn on encoder screenshot failure * Warn on errors in path-handling code * Undo the errcheck on configurations for now. * Use one-line initializers where applicable rather than using err := f() if err!= nil { .. prefer the shorter if err := f(); err != nil { .. If f() isn't too long of a name, or wraps a function with a body.
2021-09-20 23:34:25 +00:00
if err := instance.Paths.Generated.EmptyTmpDir(); err != nil {
logger.Warnf("failure emptying temporary directory: %v", err)
}
return
}
2019-05-27 19:34:26 +00:00
if scene == nil {
logger.Errorf("nil scene, skipping generate")
continue
}
2021-10-06 03:14:23 +00:00
if utils.IsTrue(input.Sprites) {
task := GenerateSpriteTask{
Scene: *scene,
Overwrite: overwrite,
fileNamingAlgorithm: fileNamingAlgo,
}
wg.Add()
2021-05-24 04:24:18 +00:00
go progress.ExecuteTask(fmt.Sprintf("Generating sprites for %s", scene.Path), func() {
task.Start()
wg.Done()
2021-05-24 04:24:18 +00:00
})
2019-02-11 06:39:21 +00:00
}
2021-10-06 03:14:23 +00:00
if utils.IsTrue(input.Previews) {
task := GeneratePreviewTask{
2020-08-06 01:21:14 +00:00
Scene: *scene,
2021-10-06 03:14:23 +00:00
ImagePreview: utils.IsTrue(input.ImagePreviews),
2020-08-06 01:21:14 +00:00
Options: *generatePreviewOptions,
Overwrite: overwrite,
fileNamingAlgorithm: fileNamingAlgo,
}
wg.Add()
2021-05-24 04:24:18 +00:00
go progress.ExecuteTask(fmt.Sprintf("Generating preview for %s", scene.Path), func() {
task.Start()
wg.Done()
2021-05-24 04:24:18 +00:00
})
2019-02-11 06:39:21 +00:00
}
2021-10-06 03:14:23 +00:00
if utils.IsTrue(input.Markers) {
wg.Add()
task := GenerateMarkersTask{
TxnManager: s.TxnManager,
Scene: scene,
Overwrite: overwrite,
fileNamingAlgorithm: fileNamingAlgo,
2021-10-06 03:14:23 +00:00
ImagePreview: utils.IsTrue(input.MarkerImagePreviews),
Screenshot: utils.IsTrue(input.MarkerScreenshots),
}
2021-05-24 04:24:18 +00:00
go progress.ExecuteTask(fmt.Sprintf("Generating markers for %s", scene.Path), func() {
task.Start()
wg.Done()
2021-05-24 04:24:18 +00:00
})
2019-02-11 06:39:21 +00:00
}
2021-10-06 03:14:23 +00:00
if utils.IsTrue(input.Transcodes) {
wg.Add()
task := GenerateTranscodeTask{
Scene: *scene,
Overwrite: overwrite,
fileNamingAlgorithm: fileNamingAlgo,
}
2021-05-24 04:24:18 +00:00
go progress.ExecuteTask(fmt.Sprintf("Generating transcode for %s", scene.Path), func() {
task.Start()
wg.Done()
2021-05-24 04:24:18 +00:00
})
2019-02-11 06:39:21 +00:00
}
2021-10-06 03:14:23 +00:00
if utils.IsTrue(input.Phashes) {
task := GeneratePhashTask{
Scene: *scene,
fileNamingAlgorithm: fileNamingAlgo,
txnManager: s.TxnManager,
Overwrite: overwrite,
}
wg.Add()
2021-05-24 04:24:18 +00:00
go progress.ExecuteTask(fmt.Sprintf("Generating phash for %s", scene.Path), func() {
task.Start()
wg.Done()
2021-05-24 04:24:18 +00:00
})
}
2019-02-11 06:39:21 +00:00
}
wg.Wait()
2021-05-24 04:24:18 +00:00
for _, marker := range markers {
progress.Increment()
if job.IsCancelled(ctx) {
logger.Info("Stopping due to user request")
wg.Wait()
Errcheck phase 1 (#1715) * Avoid redundant logging in migrations Return the error and let the caller handle the logging of the error if needed. While here, defer m.Close() to the function boundary. * Treat errors as values Use %v rather than %s and pass the errors directly. * Generate a wrapped error on stat-failure * Log 3 unchecked errors Rather than ignore errors, log them at the WARNING log level. The server has been functioning without these, so assume they are not at the ERROR level. * Propagate errors upward Failure in path generation was ignored. Propagate the errors upward the call stack, so it can be handled at the level of orchestration. * Warn on errors Log errors rather than quenching them. Errors are logged at the Warn-level for now. * Check error when creating test databases Use the builtin log package and stop the program fatally on error. * Add warnings to uncheck task errors Focus on the task system in a single commit, logging unchecked errors as warnings. * Warn-on-error in API routes Look through the API routes, and make sure errors are being logged if they occur. Prefer the Warn-log-level because none of these has proven to be fatal in the system up until now. * Propagate error when adding Util API * Propagate error on adding util API * Return unhandled error * JS log API: propagate and log errors * JS Plugins: log GQL addition failures. * Warn on failure to write to stdin * Warn on failure to stop task * Wrap viper.BindEnv The current viper code only errors if no name is provided, so it should never fail. Rewrite the code flow to factor through a panic-function. This removes error warnings from this part of the code. * Log errors in concurrency test If we can't initialize the configuration, treat the test as a failure. * Warn on errors in configuration code * Plug an unchecked error in gallery zip walking * Warn on screenshot serving failure * Warn on encoder screenshot failure * Warn on errors in path-handling code * Undo the errcheck on configurations for now. * Use one-line initializers where applicable rather than using err := f() if err!= nil { .. prefer the shorter if err := f(); err != nil { .. If f() isn't too long of a name, or wraps a function with a body.
2021-09-20 23:34:25 +00:00
if err := instance.Paths.Generated.EmptyTmpDir(); err != nil {
logger.Warnf("failure emptying temporary directory: %v", err)
}
elapsed := time.Since(start)
logger.Info(fmt.Sprintf("Generate finished (%s)", elapsed))
return
}
if marker == nil {
logger.Errorf("nil marker, skipping generate")
continue
}
wg.Add()
task := GenerateMarkersTask{
TxnManager: s.TxnManager,
Marker: marker,
Overwrite: overwrite,
fileNamingAlgorithm: fileNamingAlgo,
}
2021-05-24 04:24:18 +00:00
go progress.ExecuteTask(fmt.Sprintf("Generating marker preview for marker ID %d", marker.ID), func() {
task.Start()
wg.Done()
2021-05-24 04:24:18 +00:00
})
}
wg.Wait()
Errcheck phase 1 (#1715) * Avoid redundant logging in migrations Return the error and let the caller handle the logging of the error if needed. While here, defer m.Close() to the function boundary. * Treat errors as values Use %v rather than %s and pass the errors directly. * Generate a wrapped error on stat-failure * Log 3 unchecked errors Rather than ignore errors, log them at the WARNING log level. The server has been functioning without these, so assume they are not at the ERROR level. * Propagate errors upward Failure in path generation was ignored. Propagate the errors upward the call stack, so it can be handled at the level of orchestration. * Warn on errors Log errors rather than quenching them. Errors are logged at the Warn-level for now. * Check error when creating test databases Use the builtin log package and stop the program fatally on error. * Add warnings to uncheck task errors Focus on the task system in a single commit, logging unchecked errors as warnings. * Warn-on-error in API routes Look through the API routes, and make sure errors are being logged if they occur. Prefer the Warn-log-level because none of these has proven to be fatal in the system up until now. * Propagate error when adding Util API * Propagate error on adding util API * Return unhandled error * JS log API: propagate and log errors * JS Plugins: log GQL addition failures. * Warn on failure to write to stdin * Warn on failure to stop task * Wrap viper.BindEnv The current viper code only errors if no name is provided, so it should never fail. Rewrite the code flow to factor through a panic-function. This removes error warnings from this part of the code. * Log errors in concurrency test If we can't initialize the configuration, treat the test as a failure. * Warn on errors in configuration code * Plug an unchecked error in gallery zip walking * Warn on screenshot serving failure * Warn on encoder screenshot failure * Warn on errors in path-handling code * Undo the errcheck on configurations for now. * Use one-line initializers where applicable rather than using err := f() if err!= nil { .. prefer the shorter if err := f(); err != nil { .. If f() isn't too long of a name, or wraps a function with a body.
2021-09-20 23:34:25 +00:00
if err = instance.Paths.Generated.EmptyTmpDir(); err != nil {
logger.Warnf("failure emptying temporary directory: %v", err)
}
elapsed := time.Since(start)
logger.Info(fmt.Sprintf("Generate finished (%s)", elapsed))
2021-05-24 04:24:18 +00:00
})
return s.JobManager.Add(ctx, "Generating...", j), nil
2019-02-11 06:39:21 +00:00
}
func (s *singleton) GenerateDefaultScreenshot(ctx context.Context, sceneId string) int {
return s.generateScreenshot(ctx, sceneId, nil)
}
func (s *singleton) GenerateScreenshot(ctx context.Context, sceneId string, at float64) int {
return s.generateScreenshot(ctx, sceneId, &at)
}
// generate default screenshot if at is nil
func (s *singleton) generateScreenshot(ctx context.Context, sceneId string, at *float64) int {
Errcheck phase 1 (#1715) * Avoid redundant logging in migrations Return the error and let the caller handle the logging of the error if needed. While here, defer m.Close() to the function boundary. * Treat errors as values Use %v rather than %s and pass the errors directly. * Generate a wrapped error on stat-failure * Log 3 unchecked errors Rather than ignore errors, log them at the WARNING log level. The server has been functioning without these, so assume they are not at the ERROR level. * Propagate errors upward Failure in path generation was ignored. Propagate the errors upward the call stack, so it can be handled at the level of orchestration. * Warn on errors Log errors rather than quenching them. Errors are logged at the Warn-level for now. * Check error when creating test databases Use the builtin log package and stop the program fatally on error. * Add warnings to uncheck task errors Focus on the task system in a single commit, logging unchecked errors as warnings. * Warn-on-error in API routes Look through the API routes, and make sure errors are being logged if they occur. Prefer the Warn-log-level because none of these has proven to be fatal in the system up until now. * Propagate error when adding Util API * Propagate error on adding util API * Return unhandled error * JS log API: propagate and log errors * JS Plugins: log GQL addition failures. * Warn on failure to write to stdin * Warn on failure to stop task * Wrap viper.BindEnv The current viper code only errors if no name is provided, so it should never fail. Rewrite the code flow to factor through a panic-function. This removes error warnings from this part of the code. * Log errors in concurrency test If we can't initialize the configuration, treat the test as a failure. * Warn on errors in configuration code * Plug an unchecked error in gallery zip walking * Warn on screenshot serving failure * Warn on encoder screenshot failure * Warn on errors in path-handling code * Undo the errcheck on configurations for now. * Use one-line initializers where applicable rather than using err := f() if err!= nil { .. prefer the shorter if err := f(); err != nil { .. If f() isn't too long of a name, or wraps a function with a body.
2021-09-20 23:34:25 +00:00
if err := instance.Paths.Generated.EnsureTmpDir(); err != nil {
logger.Warnf("failure generating screenshot: %v", err)
}
2021-05-24 04:24:18 +00:00
j := job.MakeJobExec(func(ctx context.Context, progress *job.Progress) {
sceneIdInt, err := strconv.Atoi(sceneId)
if err != nil {
logger.Errorf("Error parsing scene id %s: %s", sceneId, err.Error())
return
}
var scene *models.Scene
if err := s.TxnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
var err error
scene, err = r.Scene().Find(sceneIdInt)
return err
}); err != nil || scene == nil {
logger.Errorf("failed to get scene for generate: %s", err.Error())
return
}
task := GenerateScreenshotTask{
txnManager: s.TxnManager,
2020-08-06 01:21:14 +00:00
Scene: *scene,
ScreenshotAt: at,
fileNamingAlgorithm: config.GetInstance().GetVideoFileNamingAlgorithm(),
}
task.Start()
logger.Infof("Generate screenshot finished")
2021-05-24 04:24:18 +00:00
})
return s.JobManager.Add(ctx, fmt.Sprintf("Generating screenshot for scene id %s", sceneId), j)
2021-04-26 02:51:31 +00:00
}
func (s *singleton) AutoTag(ctx context.Context, input models.AutoTagMetadataInput) int {
2021-05-24 04:24:18 +00:00
j := autoTagJob{
txnManager: s.TxnManager,
2021-05-24 04:24:18 +00:00
input: input,
2021-04-26 02:51:31 +00:00
}
return s.JobManager.Add(ctx, "Auto-tagging...", &j)
}
func (s *singleton) Clean(ctx context.Context, input models.CleanMetadataInput) int {
j := cleanJob{
txnManager: s.TxnManager,
input: input,
scanSubs: s.scanSubs,
}
2020-08-06 01:21:14 +00:00
return s.JobManager.Add(ctx, "Cleaning...", &j)
2021-05-24 04:24:18 +00:00
}
2020-08-06 01:21:14 +00:00
func (s *singleton) MigrateHash(ctx context.Context) int {
2021-05-24 04:24:18 +00:00
j := job.MakeJobExec(func(ctx context.Context, progress *job.Progress) {
fileNamingAlgo := config.GetInstance().GetVideoFileNamingAlgorithm()
2020-08-06 01:21:14 +00:00
logger.Infof("Migrating generated files for %s naming hash", fileNamingAlgo.String())
var scenes []*models.Scene
if err := s.TxnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
var err error
scenes, err = r.Scene().All()
return err
}); err != nil {
logger.Errorf("failed to fetch list of scenes for migration: %s", err.Error())
2020-08-06 01:21:14 +00:00
return
}
var wg sync.WaitGroup
total := len(scenes)
2021-05-24 04:24:18 +00:00
progress.SetTotal(total)
2020-08-06 01:21:14 +00:00
2021-05-24 04:24:18 +00:00
for _, scene := range scenes {
progress.Increment()
if job.IsCancelled(ctx) {
2020-08-06 01:21:14 +00:00
logger.Info("Stopping due to user request")
return
}
if scene == nil {
logger.Errorf("nil scene, skipping migrate")
continue
}
wg.Add(1)
task := MigrateHashTask{Scene: scene, fileNamingAlgorithm: fileNamingAlgo}
go func() {
task.Start()
wg.Done()
}()
2020-08-06 01:21:14 +00:00
wg.Wait()
}
logger.Info("Finished migrating")
2021-05-24 04:24:18 +00:00
})
2019-02-11 06:39:21 +00:00
return s.JobManager.Add(ctx, "Migrating scene hashes...", j)
}
type totalsGenerate struct {
sprites int64
previews int64
imagePreviews int64
markers int64
transcodes int64
phashes int64
}
func (s *singleton) neededGenerate(scenes []*models.Scene, input models.GenerateMetadataInput) *totalsGenerate {
var totals totalsGenerate
const timeout = 90 * time.Second
// create a control channel through which to signal the counting loop when the timeout is reached
chTimeout := make(chan struct{})
//run the timeout function in a separate thread
go func() {
time.Sleep(timeout)
chTimeout <- struct{}{}
}()
fileNamingAlgo := config.GetInstance().GetVideoFileNamingAlgorithm()
overwrite := false
if input.Overwrite != nil {
overwrite = *input.Overwrite
}
logger.Infof("Counting content to generate...")
for _, scene := range scenes {
if scene != nil {
2021-10-06 03:14:23 +00:00
if utils.IsTrue(input.Sprites) {
2020-08-06 01:21:14 +00:00
task := GenerateSpriteTask{
Scene: *scene,
fileNamingAlgorithm: fileNamingAlgo,
}
if overwrite || task.required() {
totals.sprites++
}
}
2021-10-06 03:14:23 +00:00
if utils.IsTrue(input.Previews) {
2020-08-06 01:21:14 +00:00
task := GeneratePreviewTask{
Scene: *scene,
2021-10-06 03:14:23 +00:00
ImagePreview: utils.IsTrue(input.ImagePreviews),
2020-08-06 01:21:14 +00:00
fileNamingAlgorithm: fileNamingAlgo,
}
sceneHash := scene.GetHash(task.fileNamingAlgorithm)
if overwrite || !task.doesVideoPreviewExist(sceneHash) {
totals.previews++
}
2020-08-06 01:21:14 +00:00
2021-10-06 03:14:23 +00:00
if utils.IsTrue(input.ImagePreviews) && (overwrite || !task.doesImagePreviewExist(sceneHash)) {
totals.imagePreviews++
}
}
2021-10-06 03:14:23 +00:00
if utils.IsTrue(input.Markers) {
2020-08-06 01:21:14 +00:00
task := GenerateMarkersTask{
TxnManager: s.TxnManager,
2020-08-06 01:21:14 +00:00
Scene: scene,
Overwrite: overwrite,
fileNamingAlgorithm: fileNamingAlgo,
}
totals.markers += int64(task.isMarkerNeeded())
}
2021-10-06 03:14:23 +00:00
if utils.IsTrue(input.Transcodes) {
2020-08-06 01:21:14 +00:00
task := GenerateTranscodeTask{
Scene: *scene,
Overwrite: overwrite,
fileNamingAlgorithm: fileNamingAlgo,
}
if task.isTranscodeNeeded() {
totals.transcodes++
}
}
2021-10-06 03:14:23 +00:00
if utils.IsTrue(input.Phashes) {
task := GeneratePhashTask{
Scene: *scene,
fileNamingAlgorithm: fileNamingAlgo,
}
if task.shouldGenerate() {
totals.phashes++
}
}
}
//check for timeout
select {
case <-chTimeout:
return nil
default:
}
}
return &totals
}
func (s *singleton) StashBoxBatchPerformerTag(ctx context.Context, input models.StashBoxBatchPerformerTagInput) int {
2021-05-24 04:24:18 +00:00
j := job.MakeJobExec(func(ctx context.Context, progress *job.Progress) {
logger.Infof("Initiating stash-box batch performer tag")
boxes := config.GetInstance().GetStashBoxes()
if input.Endpoint < 0 || input.Endpoint >= len(boxes) {
logger.Error(fmt.Errorf("invalid stash_box_index %d", input.Endpoint))
return
}
box := boxes[input.Endpoint]
var tasks []StashBoxPerformerTagTask
if len(input.PerformerIds) > 0 {
if err := s.TxnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
performerQuery := r.Performer()
for _, performerID := range input.PerformerIds {
if id, err := strconv.Atoi(performerID); err == nil {
performer, err := performerQuery.Find(id)
if err == nil {
tasks = append(tasks, StashBoxPerformerTagTask{
txnManager: s.TxnManager,
performer: performer,
refresh: input.Refresh,
box: box,
excluded_fields: input.ExcludeFields,
})
} else {
return err
}
}
}
return nil
}); err != nil {
logger.Error(err.Error())
}
} else if len(input.PerformerNames) > 0 {
for i := range input.PerformerNames {
if len(input.PerformerNames[i]) > 0 {
tasks = append(tasks, StashBoxPerformerTagTask{
txnManager: s.TxnManager,
name: &input.PerformerNames[i],
refresh: input.Refresh,
box: box,
excluded_fields: input.ExcludeFields,
})
}
}
} else {
if err := s.TxnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
performerQuery := r.Performer()
var performers []*models.Performer
var err error
if input.Refresh {
performers, err = performerQuery.FindByStashIDStatus(true, box.Endpoint)
} else {
performers, err = performerQuery.FindByStashIDStatus(false, box.Endpoint)
}
if err != nil {
Errorlint sweep + minor linter tweaks (#1796) * Replace error assertions with Go 1.13 style Use `errors.As(..)` over type assertions. This enables better use of wrapped errors in the future, and lets us pass some errorlint checks in the process. The rewrite is entirely mechanical, and uses a standard idiom for doing so. * Use Go 1.13's errors.Is(..) Rather than directly checking for error equality, use errors.Is(..). This protects against error wrapping issues in the future. Even though something like sql.ErrNoRows doesn't need the wrapping, do so anyway, for the sake of consistency throughout the code base. The change almost lets us pass the `errorlint` Go checker except for a missing case in `js.go` which is to be handled separately; it isn't mechanical, like these changes are. * Remove goconst goconst isn't a useful linter in many cases, because it's false positive rate is high. It's 100% for the current code base. * Avoid direct comparison of errors in recover() Assert that we are catching an error from recover(). If we are, check that the error caught matches errStop. * Enable the "errorlint" checker Configure the checker to avoid checking for errorf wraps. These are often false positives since the suggestion is to blanket wrap errors with %w, and that exposes the underlying API which you might not want to do. The other warnings are good however, and with the current patch stack, the code base passes all these checks as well. * Configure rowserrcheck The project uses sqlx. Configure rowserrcheck to include said package. * Mechanically rewrite a large set of errors Mechanically search for errors that look like fmt.Errorf("...%s", err.Error()) and rewrite those into fmt.Errorf("...%v", err) The `fmt` package is error-aware and knows how to call err.Error() itself. The rationale is that this is more idiomatic Go; it paves the way for using error wrapping later with %w in some sites. This patch only addresses the entirely mechanical rewriting caught by a project-side search/replace. There are more individual sites not addressed by this patch.
2021-10-12 03:03:08 +00:00
return fmt.Errorf("error querying performers: %v", err)
}
for _, performer := range performers {
tasks = append(tasks, StashBoxPerformerTagTask{
txnManager: s.TxnManager,
performer: performer,
refresh: input.Refresh,
box: box,
excluded_fields: input.ExcludeFields,
})
}
return nil
}); err != nil {
logger.Error(err.Error())
return
}
}
if len(tasks) == 0 {
return
}
2021-05-24 04:24:18 +00:00
progress.SetTotal(len(tasks))
logger.Infof("Starting stash-box batch operation for %d performers", len(tasks))
var wg sync.WaitGroup
for _, task := range tasks {
wg.Add(1)
2021-05-24 04:24:18 +00:00
progress.ExecuteTask(task.Description(), func() {
task.Start()
wg.Done()
2021-05-24 04:24:18 +00:00
})
2021-05-24 04:24:18 +00:00
progress.Increment()
}
2021-05-24 04:24:18 +00:00
})
return s.JobManager.Add(ctx, "Batch stash-box performer tag...", j)
}