2021-01-18 01:23:20 +00:00
package sqlite
2019-02-09 12:30:49 +00:00
import (
2022-05-19 07:49:32 +00:00
"context"
2019-02-09 12:30:49 +00:00
"database/sql"
Errorlint sweep + minor linter tweaks (#1796)
* Replace error assertions with Go 1.13 style
Use `errors.As(..)` over type assertions. This enables better use of
wrapped errors in the future, and lets us pass some errorlint checks
in the process.
The rewrite is entirely mechanical, and uses a standard idiom for
doing so.
* Use Go 1.13's errors.Is(..)
Rather than directly checking for error equality, use errors.Is(..).
This protects against error wrapping issues in the future.
Even though something like sql.ErrNoRows doesn't need the wrapping, do
so anyway, for the sake of consistency throughout the code base.
The change almost lets us pass the `errorlint` Go checker except for
a missing case in `js.go` which is to be handled separately; it isn't
mechanical, like these changes are.
* Remove goconst
goconst isn't a useful linter in many cases, because it's false positive
rate is high. It's 100% for the current code base.
* Avoid direct comparison of errors in recover()
Assert that we are catching an error from recover(). If we are,
check that the error caught matches errStop.
* Enable the "errorlint" checker
Configure the checker to avoid checking for errorf wraps. These are
often false positives since the suggestion is to blanket wrap errors
with %w, and that exposes the underlying API which you might not want
to do.
The other warnings are good however, and with the current patch stack,
the code base passes all these checks as well.
* Configure rowserrcheck
The project uses sqlx. Configure rowserrcheck to include said package.
* Mechanically rewrite a large set of errors
Mechanically search for errors that look like
fmt.Errorf("...%s", err.Error())
and rewrite those into
fmt.Errorf("...%v", err)
The `fmt` package is error-aware and knows how to call err.Error()
itself.
The rationale is that this is more idiomatic Go; it paves the
way for using error wrapping later with %w in some sites.
This patch only addresses the entirely mechanical rewriting caught by
a project-side search/replace. There are more individual sites not
addressed by this patch.
2021-10-12 03:03:08 +00:00
"errors"
2020-07-19 01:59:18 +00:00
"fmt"
2022-07-13 06:30:54 +00:00
"path/filepath"
2021-03-02 00:27:36 +00:00
"strconv"
2021-04-11 23:04:40 +00:00
"strings"
2019-08-15 07:32:57 +00:00
2022-07-13 06:30:54 +00:00
"github.com/doug-martin/goqu/v9"
"github.com/doug-martin/goqu/v9/exp"
2019-08-15 07:32:57 +00:00
"github.com/jmoiron/sqlx"
2022-07-13 06:30:54 +00:00
"gopkg.in/guregu/null.v4"
"gopkg.in/guregu/null.v4/zero"
"github.com/stashapp/stash/pkg/file"
2021-01-18 01:23:20 +00:00
"github.com/stashapp/stash/pkg/models"
2022-07-13 06:30:54 +00:00
"github.com/stashapp/stash/pkg/sliceutil/intslice"
2021-04-11 23:04:40 +00:00
"github.com/stashapp/stash/pkg/utils"
2019-02-09 12:30:49 +00:00
)
2022-07-13 06:30:54 +00:00
const (
sceneTable = "scenes"
scenesFilesTable = "scenes_files"
sceneIDColumn = "scene_id"
performersScenesTable = "performers_scenes"
scenesTagsTable = "scenes_tags"
scenesGalleriesTable = "scenes_galleries"
moviesScenesTable = "movies_scenes"
)
2019-02-09 12:30:49 +00:00
2022-07-13 06:30:54 +00:00
var findExactDuplicateQuery = `
SELECT GROUP_CONCAT ( scenes . id ) as ids
FROM scenes
INNER JOIN scenes_files ON ( scenes . id = scenes_files . scene_id )
INNER JOIN files ON ( scenes_files . file_id = files . id )
INNER JOIN files_fingerprints ON ( scenes_files . file_id = files_fingerprints . file_id AND files_fingerprints . type = ' phash ' )
GROUP BY files_fingerprints . fingerprint
HAVING COUNT ( files_fingerprints . fingerprint ) > 1 AND COUNT ( DISTINCT scenes . id ) > 1
ORDER BY SUM ( files . size ) DESC ;
2020-05-11 05:19:11 +00:00
`
2022-07-13 06:30:54 +00:00
var findAllPhashesQuery = `
SELECT scenes . id as id , files_fingerprints . fingerprint as phash
FROM scenes
INNER JOIN scenes_files ON ( scenes . id = scenes_files . scene_id )
INNER JOIN files ON ( scenes_files . file_id = files . id )
INNER JOIN files_fingerprints ON ( scenes_files . file_id = files_fingerprints . file_id AND files_fingerprints . type = ' phash ' )
ORDER BY files . size DESC
2020-03-10 03:28:15 +00:00
`
2019-02-09 12:30:49 +00:00
2022-07-13 06:30:54 +00:00
type sceneRow struct {
2022-09-19 04:53:46 +00:00
ID int ` db:"id" goqu:"skipinsert" `
Title zero . String ` db:"title" `
Details zero . String ` db:"details" `
URL zero . String ` db:"url" `
Date models . SQLiteDate ` db:"date" `
Rating null . Int ` db:"rating" `
Organized bool ` db:"organized" `
OCounter int ` db:"o_counter" `
StudioID null . Int ` db:"studio_id,omitempty" `
CreatedAt models . SQLiteTimestamp ` db:"created_at" `
UpdatedAt models . SQLiteTimestamp ` db:"updated_at" `
2022-07-13 06:30:54 +00:00
}
func ( r * sceneRow ) fromScene ( o models . Scene ) {
r . ID = o . ID
r . Title = zero . StringFrom ( o . Title )
r . Details = zero . StringFrom ( o . Details )
r . URL = zero . StringFrom ( o . URL )
if o . Date != nil {
_ = r . Date . Scan ( o . Date . Time )
}
r . Rating = intFromPtr ( o . Rating )
r . Organized = o . Organized
r . OCounter = o . OCounter
r . StudioID = intFromPtr ( o . StudioID )
2022-09-19 04:53:46 +00:00
r . CreatedAt = models . SQLiteTimestamp { Timestamp : o . CreatedAt }
r . UpdatedAt = models . SQLiteTimestamp { Timestamp : o . UpdatedAt }
2022-07-13 06:30:54 +00:00
}
2022-09-01 07:54:34 +00:00
type sceneQueryRow struct {
sceneRow
PrimaryFileID null . Int ` db:"primary_file_id" `
PrimaryFileFolderPath zero . String ` db:"primary_file_folder_path" `
PrimaryFileBasename zero . String ` db:"primary_file_basename" `
PrimaryFileOshash zero . String ` db:"primary_file_oshash" `
PrimaryFileChecksum zero . String ` db:"primary_file_checksum" `
}
func ( r * sceneQueryRow ) resolve ( ) * models . Scene {
ret := & models . Scene {
2022-07-13 06:30:54 +00:00
ID : r . ID ,
Title : r . Title . String ,
Details : r . Details . String ,
URL : r . URL . String ,
Date : r . Date . DatePtr ( ) ,
Rating : nullIntPtr ( r . Rating ) ,
Organized : r . Organized ,
OCounter : r . OCounter ,
StudioID : nullIntPtr ( r . StudioID ) ,
2022-09-01 07:54:34 +00:00
PrimaryFileID : nullIntFileIDPtr ( r . PrimaryFileID ) ,
OSHash : r . PrimaryFileOshash . String ,
Checksum : r . PrimaryFileChecksum . String ,
2022-09-19 04:53:46 +00:00
CreatedAt : r . CreatedAt . Timestamp ,
UpdatedAt : r . UpdatedAt . Timestamp ,
2022-07-13 06:30:54 +00:00
}
2022-09-01 07:54:34 +00:00
if r . PrimaryFileFolderPath . Valid && r . PrimaryFileBasename . Valid {
ret . Path = filepath . Join ( r . PrimaryFileFolderPath . String , r . PrimaryFileBasename . String )
}
return ret
2019-02-09 12:30:49 +00:00
}
2022-08-11 06:14:57 +00:00
type sceneRowRecord struct {
updateRecord
2020-09-20 08:36:02 +00:00
}
2022-08-11 06:14:57 +00:00
func ( r * sceneRowRecord ) fromPartial ( o models . ScenePartial ) {
r . setNullString ( "title" , o . Title )
r . setNullString ( "details" , o . Details )
r . setNullString ( "url" , o . URL )
r . setSQLiteDate ( "date" , o . Date )
r . setNullInt ( "rating" , o . Rating )
r . setBool ( "organized" , o . Organized )
r . setInt ( "o_counter" , o . OCounter )
r . setNullInt ( "studio_id" , o . StudioID )
2022-09-20 03:52:37 +00:00
r . setSQLiteTimestamp ( "created_at" , o . CreatedAt )
r . setSQLiteTimestamp ( "updated_at" , o . UpdatedAt )
2020-11-04 23:26:51 +00:00
}
2022-07-13 06:30:54 +00:00
type SceneStore struct {
repository
2022-08-11 06:14:57 +00:00
tableMgr * table
2022-07-13 06:30:54 +00:00
oCounterManager
2022-08-11 06:14:57 +00:00
fileStore * FileStore
2021-01-18 01:23:20 +00:00
}
2022-08-11 06:14:57 +00:00
func NewSceneStore ( fileStore * FileStore ) * SceneStore {
2022-07-13 06:30:54 +00:00
return & SceneStore {
2022-05-06 01:59:28 +00:00
repository : repository {
2022-07-13 06:30:54 +00:00
tableName : sceneTable ,
idColumn : idColumn ,
2022-05-06 01:59:28 +00:00
} ,
2022-07-13 06:30:54 +00:00
tableMgr : sceneTableMgr ,
oCounterManager : oCounterManager { sceneTableMgr } ,
2022-08-11 06:14:57 +00:00
fileStore : fileStore ,
2022-05-06 01:59:28 +00:00
}
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) table ( ) exp . IdentifierExpression {
return qb . tableMgr . table
2022-05-06 01:59:28 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) Create ( ctx context . Context , newObject * models . Scene , fileIDs [ ] file . ID ) error {
var r sceneRow
r . fromScene ( * newObject )
id , err := qb . tableMgr . insertID ( ctx , r )
2020-02-03 00:17:28 +00:00
if err != nil {
2022-07-13 06:30:54 +00:00
return err
}
if len ( fileIDs ) > 0 {
const firstPrimary = true
if err := scenesFilesTableMgr . insertJoins ( ctx , id , firstPrimary , fileIDs ) ; err != nil {
return err
}
2020-02-03 00:17:28 +00:00
}
2022-08-12 02:21:46 +00:00
if newObject . PerformerIDs . Loaded ( ) {
if err := scenesPerformersTableMgr . insertJoins ( ctx , id , newObject . PerformerIDs . List ( ) ) ; err != nil {
return err
}
2022-07-13 06:30:54 +00:00
}
2022-08-12 02:21:46 +00:00
if newObject . TagIDs . Loaded ( ) {
if err := scenesTagsTableMgr . insertJoins ( ctx , id , newObject . TagIDs . List ( ) ) ; err != nil {
return err
}
2022-07-13 06:30:54 +00:00
}
2022-08-12 02:21:46 +00:00
if newObject . GalleryIDs . Loaded ( ) {
if err := scenesGalleriesTableMgr . insertJoins ( ctx , id , newObject . GalleryIDs . List ( ) ) ; err != nil {
return err
}
2022-07-13 06:30:54 +00:00
}
2022-08-12 02:21:46 +00:00
if newObject . StashIDs . Loaded ( ) {
if err := scenesStashIDsTableMgr . insertJoins ( ctx , id , newObject . StashIDs . List ( ) ) ; err != nil {
return err
}
2022-07-13 06:30:54 +00:00
}
2022-08-12 02:21:46 +00:00
if newObject . Movies . Loaded ( ) {
if err := scenesMoviesTableMgr . insertJoins ( ctx , id , newObject . Movies . List ( ) ) ; err != nil {
return err
}
2022-07-13 06:30:54 +00:00
}
updated , err := qb . find ( ctx , id )
2020-02-03 00:17:28 +00:00
if err != nil {
2022-07-13 06:30:54 +00:00
return fmt . Errorf ( "finding after create: %w" , err )
2020-02-03 00:17:28 +00:00
}
2022-07-13 06:30:54 +00:00
* newObject = * updated
return nil
2020-02-03 00:17:28 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) UpdatePartial ( ctx context . Context , id int , partial models . ScenePartial ) ( * models . Scene , error ) {
r := sceneRowRecord {
updateRecord {
Record : make ( exp . Record ) ,
} ,
2020-02-03 00:17:28 +00:00
}
2022-07-13 06:30:54 +00:00
r . fromPartial ( partial )
if len ( r . Record ) > 0 {
if err := qb . tableMgr . updateByID ( ctx , id , r . Record ) ; err != nil {
return nil , err
}
}
if partial . PerformerIDs != nil {
if err := scenesPerformersTableMgr . modifyJoins ( ctx , id , partial . PerformerIDs . IDs , partial . PerformerIDs . Mode ) ; err != nil {
return nil , err
}
}
if partial . TagIDs != nil {
if err := scenesTagsTableMgr . modifyJoins ( ctx , id , partial . TagIDs . IDs , partial . TagIDs . Mode ) ; err != nil {
return nil , err
}
}
if partial . GalleryIDs != nil {
if err := scenesGalleriesTableMgr . modifyJoins ( ctx , id , partial . GalleryIDs . IDs , partial . GalleryIDs . Mode ) ; err != nil {
return nil , err
}
}
if partial . StashIDs != nil {
if err := scenesStashIDsTableMgr . modifyJoins ( ctx , id , partial . StashIDs . StashIDs , partial . StashIDs . Mode ) ; err != nil {
return nil , err
}
}
if partial . MovieIDs != nil {
if err := scenesMoviesTableMgr . modifyJoins ( ctx , id , partial . MovieIDs . Movies , partial . MovieIDs . Mode ) ; err != nil {
return nil , err
}
2020-02-03 00:17:28 +00:00
}
2022-09-05 01:46:18 +00:00
if partial . PrimaryFileID != nil {
if err := scenesFilesTableMgr . setPrimary ( ctx , id , * partial . PrimaryFileID ) ; err != nil {
return nil , err
}
}
2020-02-03 00:17:28 +00:00
2022-07-13 06:30:54 +00:00
return qb . Find ( ctx , id )
2020-02-03 00:17:28 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) Update ( ctx context . Context , updatedObject * models . Scene ) error {
var r sceneRow
r . fromScene ( * updatedObject )
if err := qb . tableMgr . updateByID ( ctx , updatedObject . ID , r ) ; err != nil {
return err
2020-02-03 00:17:28 +00:00
}
2022-08-12 02:21:46 +00:00
if updatedObject . PerformerIDs . Loaded ( ) {
if err := scenesPerformersTableMgr . replaceJoins ( ctx , updatedObject . ID , updatedObject . PerformerIDs . List ( ) ) ; err != nil {
return err
}
2022-07-13 06:30:54 +00:00
}
2022-08-12 02:21:46 +00:00
if updatedObject . TagIDs . Loaded ( ) {
if err := scenesTagsTableMgr . replaceJoins ( ctx , updatedObject . ID , updatedObject . TagIDs . List ( ) ) ; err != nil {
return err
}
2022-07-13 06:30:54 +00:00
}
2022-08-12 02:21:46 +00:00
if updatedObject . GalleryIDs . Loaded ( ) {
if err := scenesGalleriesTableMgr . replaceJoins ( ctx , updatedObject . ID , updatedObject . GalleryIDs . List ( ) ) ; err != nil {
return err
}
2022-07-13 06:30:54 +00:00
}
2022-08-12 02:21:46 +00:00
if updatedObject . StashIDs . Loaded ( ) {
if err := scenesStashIDsTableMgr . replaceJoins ( ctx , updatedObject . ID , updatedObject . StashIDs . List ( ) ) ; err != nil {
return err
}
2022-07-13 06:30:54 +00:00
}
2022-08-12 02:21:46 +00:00
if updatedObject . Movies . Loaded ( ) {
if err := scenesMoviesTableMgr . replaceJoins ( ctx , updatedObject . ID , updatedObject . Movies . List ( ) ) ; err != nil {
return err
}
2022-07-13 06:30:54 +00:00
}
2022-09-01 07:54:34 +00:00
if updatedObject . Files . Loaded ( ) {
fileIDs := make ( [ ] file . ID , len ( updatedObject . Files . List ( ) ) )
for i , f := range updatedObject . Files . List ( ) {
fileIDs [ i ] = f . ID
}
2020-02-03 00:17:28 +00:00
2022-09-01 07:54:34 +00:00
if err := scenesFilesTableMgr . replaceJoins ( ctx , updatedObject . ID , fileIDs ) ; err != nil {
return err
}
2022-07-13 06:30:54 +00:00
}
return nil
2020-02-03 00:17:28 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) Destroy ( ctx context . Context , id int ) error {
2021-01-18 01:23:20 +00:00
// delete all related table rows
// TODO - this should be handled by a delete cascade
2022-05-19 07:49:32 +00:00
if err := qb . performersRepository ( ) . destroy ( ctx , [ ] int { id } ) ; err != nil {
2020-04-01 01:07:43 +00:00
return err
}
2021-01-18 01:23:20 +00:00
// scene markers should be handled prior to calling destroy
// galleries should be handled prior to calling destroy
2022-07-13 06:30:54 +00:00
return qb . tableMgr . destroyExisting ( ctx , [ ] int { id } )
2019-08-15 07:32:57 +00:00
}
2021-01-18 01:23:20 +00:00
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) Find ( ctx context . Context , id int ) ( * models . Scene , error ) {
2022-05-19 07:49:32 +00:00
return qb . find ( ctx , id )
2019-10-14 23:54:05 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) FindMany ( ctx context . Context , ids [ ] int ) ( [ ] * models . Scene , error ) {
2022-08-11 06:14:57 +00:00
table := qb . table ( )
q := qb . selectDataset ( ) . Prepared ( true ) . Where ( table . Col ( idColumn ) . In ( ids ) )
unsorted , err := qb . getMany ( ctx , q )
if err != nil {
return nil , err
}
2020-07-19 01:59:18 +00:00
2022-08-11 06:14:57 +00:00
scenes := make ( [ ] * models . Scene , len ( ids ) )
2020-07-19 01:59:18 +00:00
2022-08-11 06:14:57 +00:00
for _ , s := range unsorted {
i := intslice . IntIndex ( ids , s . ID )
scenes [ i ] = s
}
for i := range scenes {
if scenes [ i ] == nil {
return nil , fmt . Errorf ( "scene with id %d not found" , ids [ i ] )
}
2020-07-19 01:59:18 +00:00
}
return scenes , nil
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) selectDataset ( ) * goqu . SelectDataset {
2022-08-11 06:14:57 +00:00
table := qb . table ( )
2022-09-01 07:54:34 +00:00
files := fileTableMgr . table
folders := folderTableMgr . table
checksum := fingerprintTableMgr . table . As ( "fingerprint_md5" )
oshash := fingerprintTableMgr . table . As ( "fingerprint_oshash" )
return dialect . From ( table ) . LeftJoin (
scenesFilesJoinTable ,
goqu . On (
scenesFilesJoinTable . Col ( sceneIDColumn ) . Eq ( table . Col ( idColumn ) ) ,
scenesFilesJoinTable . Col ( "primary" ) . Eq ( 1 ) ,
) ,
) . LeftJoin (
files ,
goqu . On ( files . Col ( idColumn ) . Eq ( scenesFilesJoinTable . Col ( fileIDColumn ) ) ) ,
) . LeftJoin (
folders ,
goqu . On ( folders . Col ( idColumn ) . Eq ( files . Col ( "parent_folder_id" ) ) ) ,
) . LeftJoin (
checksum ,
goqu . On (
checksum . Col ( fileIDColumn ) . Eq ( scenesFilesJoinTable . Col ( fileIDColumn ) ) ,
checksum . Col ( "type" ) . Eq ( file . FingerprintTypeMD5 ) ,
) ,
) . LeftJoin (
oshash ,
goqu . On (
oshash . Col ( fileIDColumn ) . Eq ( scenesFilesJoinTable . Col ( fileIDColumn ) ) ,
oshash . Col ( "type" ) . Eq ( file . FingerprintTypeOshash ) ,
) ,
) . Select (
qb . table ( ) . All ( ) ,
scenesFilesJoinTable . Col ( fileIDColumn ) . As ( "primary_file_id" ) ,
folders . Col ( "path" ) . As ( "primary_file_folder_path" ) ,
files . Col ( "basename" ) . As ( "primary_file_basename" ) ,
checksum . Col ( "fingerprint" ) . As ( "primary_file_checksum" ) ,
oshash . Col ( "fingerprint" ) . As ( "primary_file_oshash" ) ,
)
2022-07-13 06:30:54 +00:00
}
func ( qb * SceneStore ) get ( ctx context . Context , q * goqu . SelectDataset ) ( * models . Scene , error ) {
ret , err := qb . getMany ( ctx , q )
if err != nil {
return nil , err
}
if len ( ret ) == 0 {
return nil , sql . ErrNoRows
}
return ret [ 0 ] , nil
}
func ( qb * SceneStore ) getMany ( ctx context . Context , q * goqu . SelectDataset ) ( [ ] * models . Scene , error ) {
const single = false
2022-08-11 06:14:57 +00:00
var ret [ ] * models . Scene
2022-09-02 01:18:37 +00:00
var lastID int
2022-07-13 06:30:54 +00:00
if err := queryFunc ( ctx , q , single , func ( r * sqlx . Rows ) error {
2022-09-01 07:54:34 +00:00
var f sceneQueryRow
2022-07-13 06:30:54 +00:00
if err := r . StructScan ( & f ) ; err != nil {
return err
2021-01-18 01:23:20 +00:00
}
2022-07-13 06:30:54 +00:00
2022-08-11 06:14:57 +00:00
s := f . resolve ( )
2022-09-02 01:18:37 +00:00
if s . ID == lastID {
return fmt . Errorf ( "internal error: multiple rows returned for single scene id %d" , s . ID )
}
lastID = s . ID
2022-08-11 06:14:57 +00:00
ret = append ( ret , s )
return nil
} ) ; err != nil {
return nil , err
}
return ret , nil
}
2022-09-01 07:54:34 +00:00
func ( qb * SceneStore ) GetFiles ( ctx context . Context , id int ) ( [ ] * file . VideoFile , error ) {
2022-08-11 06:14:57 +00:00
fileIDs , err := qb . filesRepository ( ) . get ( ctx , id )
if err != nil {
return nil , err
}
// use fileStore to load files
files , err := qb . fileStore . Find ( ctx , fileIDs ... )
if err != nil {
return nil , err
}
ret := make ( [ ] * file . VideoFile , len ( files ) )
for i , f := range files {
var ok bool
ret [ i ] , ok = f . ( * file . VideoFile )
if ! ok {
return nil , fmt . Errorf ( "expected file to be *file.VideoFile not %T" , f )
}
}
return ret , nil
}
2022-09-01 07:54:34 +00:00
func ( qb * SceneStore ) GetManyFileIDs ( ctx context . Context , ids [ ] int ) ( [ ] [ ] file . ID , error ) {
const primaryOnly = false
return qb . filesRepository ( ) . getMany ( ctx , ids , primaryOnly )
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) find ( ctx context . Context , id int ) ( * models . Scene , error ) {
2022-08-11 06:14:57 +00:00
q := qb . selectDataset ( ) . Where ( qb . tableMgr . byID ( id ) )
2022-07-13 06:30:54 +00:00
ret , err := qb . get ( ctx , q )
if err != nil {
return nil , fmt . Errorf ( "getting scene by id %d: %w" , id , err )
}
return ret , nil
2019-02-09 12:30:49 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) FindByFileID ( ctx context . Context , fileID file . ID ) ( [ ] * models . Scene , error ) {
2022-08-01 01:01:29 +00:00
sq := dialect . From ( scenesFilesJoinTable ) . Select ( scenesFilesJoinTable . Col ( sceneIDColumn ) ) . Where (
scenesFilesJoinTable . Col ( fileIDColumn ) . Eq ( fileID ) ,
2022-07-13 06:30:54 +00:00
)
ret , err := qb . findBySubquery ( ctx , sq )
if err != nil {
return nil , fmt . Errorf ( "getting scenes by file id %d: %w" , fileID , err )
}
return ret , nil
2020-08-06 01:21:14 +00:00
}
2022-10-11 03:22:23 +00:00
func ( qb * SceneStore ) FindByPrimaryFileID ( ctx context . Context , fileID file . ID ) ( [ ] * models . Scene , error ) {
sq := dialect . From ( scenesFilesJoinTable ) . Select ( scenesFilesJoinTable . Col ( sceneIDColumn ) ) . Where (
scenesFilesJoinTable . Col ( fileIDColumn ) . Eq ( fileID ) ,
scenesFilesJoinTable . Col ( "primary" ) . Eq ( 1 ) ,
)
ret , err := qb . findBySubquery ( ctx , sq )
if err != nil {
return nil , fmt . Errorf ( "getting scenes by primary file id %d: %w" , fileID , err )
}
return ret , nil
}
2022-07-18 00:51:59 +00:00
func ( qb * SceneStore ) CountByFileID ( ctx context . Context , fileID file . ID ) ( int , error ) {
joinTable := scenesFilesJoinTable
q := dialect . Select ( goqu . COUNT ( "*" ) ) . From ( joinTable ) . Where ( joinTable . Col ( fileIDColumn ) . Eq ( fileID ) )
return count ( ctx , q )
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) FindByFingerprints ( ctx context . Context , fp [ ] file . Fingerprint ) ( [ ] * models . Scene , error ) {
2022-08-01 01:01:29 +00:00
fingerprintTable := fingerprintTableMgr . table
2022-07-13 06:30:54 +00:00
var ex [ ] exp . Expression
for _ , v := range fp {
ex = append ( ex , goqu . And (
2022-08-01 01:01:29 +00:00
fingerprintTable . Col ( "type" ) . Eq ( v . Type ) ,
fingerprintTable . Col ( "fingerprint" ) . Eq ( v . Fingerprint ) ,
2022-07-13 06:30:54 +00:00
) )
}
2022-08-01 01:01:29 +00:00
sq := dialect . From ( scenesFilesJoinTable ) .
InnerJoin (
fingerprintTable ,
goqu . On ( fingerprintTable . Col ( fileIDColumn ) . Eq ( scenesFilesJoinTable . Col ( fileIDColumn ) ) ) ,
) .
Select ( scenesFilesJoinTable . Col ( sceneIDColumn ) ) . Where ( goqu . Or ( ex ... ) )
2022-07-13 06:30:54 +00:00
ret , err := qb . findBySubquery ( ctx , sq )
if err != nil {
return nil , fmt . Errorf ( "getting scenes by fingerprints: %w" , err )
}
return ret , nil
}
func ( qb * SceneStore ) FindByChecksum ( ctx context . Context , checksum string ) ( [ ] * models . Scene , error ) {
2022-08-01 01:01:29 +00:00
return qb . FindByFingerprints ( ctx , [ ] file . Fingerprint {
{
Type : file . FingerprintTypeMD5 ,
Fingerprint : checksum ,
} ,
} )
2022-07-13 06:30:54 +00:00
}
func ( qb * SceneStore ) FindByOSHash ( ctx context . Context , oshash string ) ( [ ] * models . Scene , error ) {
2022-08-01 01:01:29 +00:00
return qb . FindByFingerprints ( ctx , [ ] file . Fingerprint {
{
Type : file . FingerprintTypeOshash ,
Fingerprint : oshash ,
} ,
} )
2022-07-13 06:30:54 +00:00
}
func ( qb * SceneStore ) FindByPath ( ctx context . Context , p string ) ( [ ] * models . Scene , error ) {
2022-08-01 01:01:29 +00:00
filesTable := fileTableMgr . table
foldersTable := folderTableMgr . table
2022-07-13 06:30:54 +00:00
basename := filepath . Base ( p )
2022-08-26 04:56:46 +00:00
dir := filepath . Dir ( p )
2022-07-13 06:30:54 +00:00
// replace wildcards
basename = strings . ReplaceAll ( basename , "*" , "%" )
2022-08-26 04:56:46 +00:00
dir = strings . ReplaceAll ( dir , "*" , "%" )
2022-07-13 06:30:54 +00:00
2022-08-01 01:01:29 +00:00
sq := dialect . From ( scenesFilesJoinTable ) . InnerJoin (
filesTable ,
goqu . On ( filesTable . Col ( idColumn ) . Eq ( scenesFilesJoinTable . Col ( fileIDColumn ) ) ) ,
) . InnerJoin (
foldersTable ,
goqu . On ( foldersTable . Col ( idColumn ) . Eq ( filesTable . Col ( "parent_folder_id" ) ) ) ,
) . Select ( scenesFilesJoinTable . Col ( sceneIDColumn ) ) . Where (
foldersTable . Col ( "path" ) . Like ( dir ) ,
filesTable . Col ( "basename" ) . Like ( basename ) ,
2022-07-13 06:30:54 +00:00
)
ret , err := qb . findBySubquery ( ctx , sq )
if err != nil && ! errors . Is ( err , sql . ErrNoRows ) {
return nil , fmt . Errorf ( "getting scene by path %s: %w" , p , err )
}
return ret , nil
}
func ( qb * SceneStore ) findBySubquery ( ctx context . Context , sq * goqu . SelectDataset ) ( [ ] * models . Scene , error ) {
2022-08-11 06:14:57 +00:00
table := qb . table ( )
2022-07-13 06:30:54 +00:00
q := qb . selectDataset ( ) . Where (
table . Col ( idColumn ) . Eq (
sq ,
) ,
)
return qb . getMany ( ctx , q )
}
func ( qb * SceneStore ) FindByPerformerID ( ctx context . Context , performerID int ) ( [ ] * models . Scene , error ) {
sq := dialect . From ( scenesPerformersJoinTable ) . Select ( scenesPerformersJoinTable . Col ( sceneIDColumn ) ) . Where (
scenesPerformersJoinTable . Col ( performerIDColumn ) . Eq ( performerID ) ,
)
ret , err := qb . findBySubquery ( ctx , sq )
if err != nil {
return nil , fmt . Errorf ( "getting scenes for performer %d: %w" , performerID , err )
}
return ret , nil
2019-02-09 12:30:49 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) FindByGalleryID ( ctx context . Context , galleryID int ) ( [ ] * models . Scene , error ) {
sq := dialect . From ( galleriesScenesJoinTable ) . Select ( galleriesScenesJoinTable . Col ( sceneIDColumn ) ) . Where (
galleriesScenesJoinTable . Col ( galleryIDColumn ) . Eq ( galleryID ) ,
)
ret , err := qb . findBySubquery ( ctx , sq )
if err != nil {
return nil , fmt . Errorf ( "getting scenes for gallery %d: %w" , galleryID , err )
}
return ret , nil
2019-02-09 12:30:49 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) CountByPerformerID ( ctx context . Context , performerID int ) ( int , error ) {
joinTable := scenesPerformersJoinTable
q := dialect . Select ( goqu . COUNT ( "*" ) ) . From ( joinTable ) . Where ( joinTable . Col ( performerIDColumn ) . Eq ( performerID ) )
return count ( ctx , q )
2021-02-01 20:56:54 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) FindByMovieID ( ctx context . Context , movieID int ) ( [ ] * models . Scene , error ) {
sq := dialect . From ( scenesMoviesJoinTable ) . Select ( scenesMoviesJoinTable . Col ( sceneIDColumn ) ) . Where (
scenesMoviesJoinTable . Col ( movieIDColumn ) . Eq ( movieID ) ,
)
ret , err := qb . findBySubquery ( ctx , sq )
if err != nil {
return nil , fmt . Errorf ( "getting scenes for movie %d: %w" , movieID , err )
}
return ret , nil
2019-02-09 12:30:49 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) CountByMovieID ( ctx context . Context , movieID int ) ( int , error ) {
joinTable := scenesMoviesJoinTable
q := dialect . Select ( goqu . COUNT ( "*" ) ) . From ( joinTable ) . Where ( joinTable . Col ( movieIDColumn ) . Eq ( movieID ) )
return count ( ctx , q )
2020-03-10 03:28:15 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) Count ( ctx context . Context ) ( int , error ) {
q := dialect . Select ( goqu . COUNT ( "*" ) ) . From ( qb . table ( ) )
return count ( ctx , q )
2020-03-10 03:28:15 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) Size ( ctx context . Context ) ( float64 , error ) {
table := qb . table ( )
fileTable := fileTableMgr . table
q := dialect . Select (
goqu . SUM ( fileTableMgr . table . Col ( "size" ) ) ,
) . From ( table ) . InnerJoin (
scenesFilesJoinTable ,
goqu . On ( table . Col ( idColumn ) . Eq ( scenesFilesJoinTable . Col ( sceneIDColumn ) ) ) ,
) . InnerJoin (
fileTable ,
goqu . On ( scenesFilesJoinTable . Col ( fileIDColumn ) . Eq ( fileTable . Col ( idColumn ) ) ) ,
)
var ret float64
if err := querySimple ( ctx , q , & ret ) ; err != nil {
return 0 , err
}
return ret , nil
2019-02-11 20:36:10 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) Duration ( ctx context . Context ) ( float64 , error ) {
2022-08-08 04:55:39 +00:00
table := qb . table ( )
videoFileTable := videoFileTableMgr . table
q := dialect . Select (
goqu . SUM ( videoFileTable . Col ( "duration" ) ) ) . From ( table ) . InnerJoin (
scenesFilesJoinTable ,
goqu . On ( scenesFilesJoinTable . Col ( "scene_id" ) . Eq ( table . Col ( idColumn ) ) ) ,
) . InnerJoin (
videoFileTable ,
goqu . On ( videoFileTable . Col ( "file_id" ) . Eq ( scenesFilesJoinTable . Col ( "file_id" ) ) ) ,
)
2022-07-13 06:30:54 +00:00
var ret float64
if err := querySimple ( ctx , q , & ret ) ; err != nil {
return 0 , err
}
return ret , nil
2021-08-26 03:37:08 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) CountByStudioID ( ctx context . Context , studioID int ) ( int , error ) {
table := qb . table ( )
q := dialect . Select ( goqu . COUNT ( "*" ) ) . From ( table ) . Where ( table . Col ( studioIDColumn ) . Eq ( studioID ) )
return count ( ctx , q )
2020-04-03 02:44:17 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) CountByTagID ( ctx context . Context , tagID int ) ( int , error ) {
joinTable := scenesTagsJoinTable
q := dialect . Select ( goqu . COUNT ( "*" ) ) . From ( joinTable ) . Where ( joinTable . Col ( tagIDColumn ) . Eq ( tagID ) )
return count ( ctx , q )
2019-02-09 12:30:49 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) countMissingFingerprints ( ctx context . Context , fpType string ) ( int , error ) {
fpTable := fingerprintTableMgr . table . As ( "fingerprints_temp" )
2022-08-01 01:01:29 +00:00
q := dialect . From ( scenesFilesJoinTable ) . LeftJoin (
2022-07-13 06:30:54 +00:00
fpTable ,
goqu . On (
2022-08-01 01:01:29 +00:00
scenesFilesJoinTable . Col ( fileIDColumn ) . Eq ( fpTable . Col ( fileIDColumn ) ) ,
2022-07-13 06:30:54 +00:00
fpTable . Col ( "type" ) . Eq ( fpType ) ,
) ,
2022-08-01 01:01:29 +00:00
) . Select ( goqu . COUNT ( goqu . DISTINCT ( scenesFilesJoinTable . Col ( sceneIDColumn ) ) ) ) . Where ( fpTable . Col ( "fingerprint" ) . IsNull ( ) )
2022-07-13 06:30:54 +00:00
return count ( ctx , q )
2019-02-09 12:30:49 +00:00
}
2020-08-06 01:21:14 +00:00
// CountMissingChecksum returns the number of scenes missing a checksum value.
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) CountMissingChecksum ( ctx context . Context ) ( int , error ) {
return qb . countMissingFingerprints ( ctx , "md5" )
2020-08-06 01:21:14 +00:00
}
// CountMissingOSHash returns the number of scenes missing an oshash value.
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) CountMissingOSHash ( ctx context . Context ) ( int , error ) {
return qb . countMissingFingerprints ( ctx , "oshash" )
2020-08-06 01:21:14 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) Wall ( ctx context . Context , q * string ) ( [ ] * models . Scene , error ) {
2019-02-09 12:30:49 +00:00
s := ""
if q != nil {
s = * q
}
2022-07-13 06:30:54 +00:00
2022-08-11 06:14:57 +00:00
table := qb . table ( )
2022-07-13 06:30:54 +00:00
qq := qb . selectDataset ( ) . Prepared ( true ) . Where ( table . Col ( "details" ) . Like ( "%" + s + "%" ) ) . Order ( goqu . L ( "RANDOM()" ) . Asc ( ) ) . Limit ( 80 )
return qb . getMany ( ctx , qq )
2019-02-09 12:30:49 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) All ( ctx context . Context ) ( [ ] * models . Scene , error ) {
2022-08-11 06:14:57 +00:00
table := qb . table ( )
fileTable := fileTableMgr . table
folderTable := folderTableMgr . table
2022-09-01 07:54:34 +00:00
return qb . getMany ( ctx , qb . selectDataset ( ) . Order (
2022-08-11 06:14:57 +00:00
folderTable . Col ( "path" ) . Asc ( ) ,
fileTable . Col ( "basename" ) . Asc ( ) ,
table . Col ( "date" ) . Asc ( ) ,
2022-07-13 06:30:54 +00:00
) )
2019-02-09 12:30:49 +00:00
}
2021-03-02 00:27:36 +00:00
func illegalFilterCombination ( type1 , type2 string ) error {
return fmt . Errorf ( "cannot have %s and %s in the same filter" , type1 , type2 )
}
2021-02-01 20:57:56 +00:00
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) validateFilter ( sceneFilter * models . SceneFilterType ) error {
2021-03-02 00:27:36 +00:00
const and = "AND"
const or = "OR"
const not = "NOT"
2021-02-01 20:57:56 +00:00
2021-03-02 00:27:36 +00:00
if sceneFilter . And != nil {
if sceneFilter . Or != nil {
return illegalFilterCombination ( and , or )
}
if sceneFilter . Not != nil {
return illegalFilterCombination ( and , not )
2021-02-01 20:57:56 +00:00
}
2021-03-02 00:27:36 +00:00
return qb . validateFilter ( sceneFilter . And )
2021-02-01 20:57:56 +00:00
}
2021-03-02 00:27:36 +00:00
if sceneFilter . Or != nil {
if sceneFilter . Not != nil {
return illegalFilterCombination ( or , not )
}
2021-02-01 20:57:56 +00:00
2021-03-02 00:27:36 +00:00
return qb . validateFilter ( sceneFilter . Or )
2021-02-01 20:57:56 +00:00
}
2021-03-02 00:27:36 +00:00
if sceneFilter . Not != nil {
return qb . validateFilter ( sceneFilter . Not )
}
2021-02-01 20:57:56 +00:00
2021-03-02 00:27:36 +00:00
return nil
}
2021-02-01 20:57:56 +00:00
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) makeFilter ( ctx context . Context , sceneFilter * models . SceneFilterType ) * filterBuilder {
2021-03-02 00:27:36 +00:00
query := & filterBuilder { }
if sceneFilter . And != nil {
2022-05-19 07:49:32 +00:00
query . and ( qb . makeFilter ( ctx , sceneFilter . And ) )
2021-03-02 00:27:36 +00:00
}
if sceneFilter . Or != nil {
2022-05-19 07:49:32 +00:00
query . or ( qb . makeFilter ( ctx , sceneFilter . Or ) )
2021-03-02 00:27:36 +00:00
}
if sceneFilter . Not != nil {
2022-05-19 07:49:32 +00:00
query . not ( qb . makeFilter ( ctx , sceneFilter . Not ) )
}
2022-08-08 04:24:08 +00:00
query . handleCriterion ( ctx , pathCriterionHandler ( sceneFilter . Path , "folders.path" , "files.basename" , qb . addFoldersTable ) )
2022-07-14 23:29:03 +00:00
query . handleCriterion ( ctx , sceneFileCountCriterionHandler ( qb , sceneFilter . FileCount ) )
2022-05-19 07:49:32 +00:00
query . handleCriterion ( ctx , stringCriterionHandler ( sceneFilter . Title , "scenes.title" ) )
query . handleCriterion ( ctx , stringCriterionHandler ( sceneFilter . Details , "scenes.details" ) )
2022-07-13 06:30:54 +00:00
query . handleCriterion ( ctx , criterionHandlerFunc ( func ( ctx context . Context , f * filterBuilder ) {
if sceneFilter . Oshash != nil {
2022-08-08 04:24:08 +00:00
qb . addSceneFilesTable ( f )
f . addLeftJoin ( fingerprintTable , "fingerprints_oshash" , "scenes_files.file_id = fingerprints_oshash.file_id AND fingerprints_oshash.type = 'oshash'" )
2022-07-13 06:30:54 +00:00
}
stringCriterionHandler ( sceneFilter . Oshash , "fingerprints_oshash.fingerprint" ) ( ctx , f )
} ) )
query . handleCriterion ( ctx , criterionHandlerFunc ( func ( ctx context . Context , f * filterBuilder ) {
if sceneFilter . Checksum != nil {
2022-08-08 04:24:08 +00:00
qb . addSceneFilesTable ( f )
f . addLeftJoin ( fingerprintTable , "fingerprints_md5" , "scenes_files.file_id = fingerprints_md5.file_id AND fingerprints_md5.type = 'md5'" )
2022-07-13 06:30:54 +00:00
}
stringCriterionHandler ( sceneFilter . Checksum , "fingerprints_md5.fingerprint" ) ( ctx , f )
} ) )
query . handleCriterion ( ctx , criterionHandlerFunc ( func ( ctx context . Context , f * filterBuilder ) {
if sceneFilter . Phash != nil {
2022-08-08 04:24:08 +00:00
qb . addSceneFilesTable ( f )
f . addLeftJoin ( fingerprintTable , "fingerprints_phash" , "scenes_files.file_id = fingerprints_phash.file_id AND fingerprints_phash.type = 'phash'" )
2022-07-13 06:30:54 +00:00
value , _ := utils . StringToPhash ( sceneFilter . Phash . Value )
intCriterionHandler ( & models . IntCriterionInput {
Value : int ( value ) ,
Modifier : sceneFilter . Phash . Modifier ,
2022-08-08 04:24:08 +00:00
} , "fingerprints_phash.fingerprint" , nil ) ( ctx , f )
2022-07-13 06:30:54 +00:00
}
} ) )
2022-08-08 04:24:08 +00:00
query . handleCriterion ( ctx , intCriterionHandler ( sceneFilter . Rating , "scenes.rating" , nil ) )
query . handleCriterion ( ctx , intCriterionHandler ( sceneFilter . OCounter , "scenes.o_counter" , nil ) )
query . handleCriterion ( ctx , boolCriterionHandler ( sceneFilter . Organized , "scenes.organized" , nil ) )
2022-07-13 06:30:54 +00:00
2022-08-08 04:24:08 +00:00
query . handleCriterion ( ctx , durationCriterionHandler ( sceneFilter . Duration , "video_files.duration" , qb . addVideoFilesTable ) )
query . handleCriterion ( ctx , resolutionCriterionHandler ( sceneFilter . Resolution , "video_files.height" , "video_files.width" , qb . addVideoFilesTable ) )
2022-07-13 06:30:54 +00:00
2022-05-19 07:49:32 +00:00
query . handleCriterion ( ctx , hasMarkersCriterionHandler ( sceneFilter . HasMarkers ) )
query . handleCriterion ( ctx , sceneIsMissingCriterionHandler ( qb , sceneFilter . IsMissing ) )
query . handleCriterion ( ctx , stringCriterionHandler ( sceneFilter . URL , "scenes.url" ) )
query . handleCriterion ( ctx , criterionHandlerFunc ( func ( ctx context . Context , f * filterBuilder ) {
2021-06-21 05:48:28 +00:00
if sceneFilter . StashID != nil {
qb . stashIDRepository ( ) . join ( f , "scene_stash_ids" , "scenes.id" )
2022-05-19 07:49:32 +00:00
stringCriterionHandler ( sceneFilter . StashID , "scene_stash_ids.stash_id" ) ( ctx , f )
2021-06-21 05:48:28 +00:00
}
} ) )
2022-08-08 04:24:08 +00:00
query . handleCriterion ( ctx , boolCriterionHandler ( sceneFilter . Interactive , "video_files.interactive" , qb . addVideoFilesTable ) )
query . handleCriterion ( ctx , intCriterionHandler ( sceneFilter . InteractiveSpeed , "video_files.interactive_speed" , qb . addVideoFilesTable ) )
2021-06-21 05:48:28 +00:00
2022-05-19 07:49:32 +00:00
query . handleCriterion ( ctx , sceneCaptionCriterionHandler ( qb , sceneFilter . Captions ) )
2022-05-06 01:59:28 +00:00
2022-05-19 07:49:32 +00:00
query . handleCriterion ( ctx , sceneTagsCriterionHandler ( qb , sceneFilter . Tags ) )
query . handleCriterion ( ctx , sceneTagCountCriterionHandler ( qb , sceneFilter . TagCount ) )
query . handleCriterion ( ctx , scenePerformersCriterionHandler ( qb , sceneFilter . Performers ) )
query . handleCriterion ( ctx , scenePerformerCountCriterionHandler ( qb , sceneFilter . PerformerCount ) )
query . handleCriterion ( ctx , sceneStudioCriterionHandler ( qb , sceneFilter . Studios ) )
query . handleCriterion ( ctx , sceneMoviesCriterionHandler ( qb , sceneFilter . Movies ) )
query . handleCriterion ( ctx , scenePerformerTagsCriterionHandler ( qb , sceneFilter . PerformerTags ) )
query . handleCriterion ( ctx , scenePerformerFavoriteCriterionHandler ( sceneFilter . PerformerFavorite ) )
query . handleCriterion ( ctx , scenePerformerAgeCriterionHandler ( sceneFilter . PerformerAge ) )
2022-08-08 04:24:08 +00:00
query . handleCriterion ( ctx , scenePhashDuplicatedCriterionHandler ( sceneFilter . Duplicated , qb . addSceneFilesTable ) )
2021-03-02 00:27:36 +00:00
return query
2021-02-01 20:57:56 +00:00
}
2022-08-08 04:24:08 +00:00
func ( qb * SceneStore ) addSceneFilesTable ( f * filterBuilder ) {
f . addLeftJoin ( scenesFilesTable , "" , "scenes_files.scene_id = scenes.id" )
}
func ( qb * SceneStore ) addFilesTable ( f * filterBuilder ) {
qb . addSceneFilesTable ( f )
f . addLeftJoin ( fileTable , "" , "scenes_files.file_id = files.id" )
}
func ( qb * SceneStore ) addFoldersTable ( f * filterBuilder ) {
qb . addFilesTable ( f )
f . addLeftJoin ( folderTable , "" , "files.parent_folder_id = folders.id" )
}
func ( qb * SceneStore ) addVideoFilesTable ( f * filterBuilder ) {
qb . addSceneFilesTable ( f )
f . addLeftJoin ( videoFileTable , "" , "video_files.file_id = scenes_files.file_id" )
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) Query ( ctx context . Context , options models . SceneQueryOptions ) ( * models . SceneQueryResult , error ) {
2021-10-25 00:40:13 +00:00
sceneFilter := options . SceneFilter
findFilter := options . FindFilter
2019-02-09 12:30:49 +00:00
if sceneFilter == nil {
2021-01-18 01:23:20 +00:00
sceneFilter = & models . SceneFilterType { }
2019-02-09 12:30:49 +00:00
}
if findFilter == nil {
2021-01-18 01:23:20 +00:00
findFilter = & models . FindFilterType { }
2019-02-09 12:30:49 +00:00
}
2021-01-18 01:23:20 +00:00
query := qb . newQuery ( )
2021-10-25 00:40:13 +00:00
distinctIDs ( & query , sceneTable )
2019-02-09 12:30:49 +00:00
if q := findFilter . Q ; q != nil && * q != "" {
2022-08-08 04:24:08 +00:00
query . addJoins (
join {
table : scenesFilesTable ,
onClause : "scenes_files.scene_id = scenes.id" ,
} ,
join {
table : fileTable ,
onClause : "scenes_files.file_id = files.id" ,
} ,
join {
table : folderTable ,
onClause : "files.parent_folder_id = folders.id" ,
} ,
join {
table : fingerprintTable ,
onClause : "files_fingerprints.file_id = scenes_files.file_id" ,
} ,
join {
table : sceneMarkerTable ,
onClause : "scene_markers.scene_id = scenes.id" ,
} ,
)
2022-10-26 07:48:13 +00:00
filepathColumn := "folders.path || '" + string ( filepath . Separator ) + "' || files.basename"
searchColumns := [ ] string { "scenes.title" , "scenes.details" , filepathColumn , "files_fingerprints.fingerprint" , "scene_markers.title" }
2021-11-22 03:59:22 +00:00
query . parseQueryString ( searchColumns , * q )
2019-02-09 12:30:49 +00:00
}
2021-03-02 00:27:36 +00:00
if err := qb . validateFilter ( sceneFilter ) ; err != nil {
2021-10-25 00:40:13 +00:00
return nil , err
2019-02-09 12:30:49 +00:00
}
2022-05-19 07:49:32 +00:00
filter := qb . makeFilter ( ctx , sceneFilter )
2019-02-09 12:30:49 +00:00
2021-03-02 00:27:36 +00:00
query . addFilter ( filter )
2020-10-24 03:31:39 +00:00
2021-04-09 08:46:00 +00:00
qb . setSceneSort ( & query , findFilter )
query . sortAndPagination += getPagination ( findFilter )
2021-02-01 20:57:56 +00:00
2022-05-19 07:49:32 +00:00
result , err := qb . queryGroupedFields ( ctx , options , query )
2021-01-18 01:23:20 +00:00
if err != nil {
2021-10-25 00:40:13 +00:00
return nil , fmt . Errorf ( "error querying aggregate fields: %w" , err )
2021-01-18 01:23:20 +00:00
}
2019-02-09 12:30:49 +00:00
2022-05-19 07:49:32 +00:00
idsResult , err := query . findIDs ( ctx )
2021-10-25 00:40:13 +00:00
if err != nil {
return nil , fmt . Errorf ( "error finding IDs: %w" , err )
}
result . IDs = idsResult
return result , nil
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) queryGroupedFields ( ctx context . Context , options models . SceneQueryOptions , query queryBuilder ) ( * models . SceneQueryResult , error ) {
2021-10-25 00:40:13 +00:00
if ! options . Count && ! options . TotalDuration && ! options . TotalSize {
// nothing to do - return empty result
return models . NewSceneQueryResult ( qb ) , nil
2019-02-09 12:30:49 +00:00
}
2021-10-25 00:40:13 +00:00
aggregateQuery := qb . newQuery ( )
if options . Count {
aggregateQuery . addColumn ( "COUNT(temp.id) as total" )
}
if options . TotalDuration {
2022-08-08 04:24:08 +00:00
query . addJoins (
join {
table : scenesFilesTable ,
onClause : "scenes_files.scene_id = scenes.id" ,
} ,
join {
table : videoFileTable ,
onClause : "scenes_files.file_id = video_files.file_id" ,
} ,
)
query . addColumn ( "COALESCE(video_files.duration, 0) as duration" )
2022-07-13 06:30:54 +00:00
aggregateQuery . addColumn ( "SUM(temp.duration) as duration" )
2021-10-25 00:40:13 +00:00
}
if options . TotalSize {
2022-08-08 04:24:08 +00:00
query . addJoins (
join {
table : scenesFilesTable ,
onClause : "scenes_files.scene_id = scenes.id" ,
} ,
join {
table : fileTable ,
onClause : "scenes_files.file_id = files.id" ,
} ,
)
query . addColumn ( "COALESCE(files.size, 0) as size" )
2022-07-13 06:30:54 +00:00
aggregateQuery . addColumn ( "SUM(temp.size) as size" )
2021-10-25 00:40:13 +00:00
}
const includeSortPagination = false
aggregateQuery . from = fmt . Sprintf ( "(%s) as temp" , query . toSQL ( includeSortPagination ) )
out := struct {
Total int
2022-07-13 06:30:54 +00:00
Duration null . Float
Size null . Float
2021-10-25 00:40:13 +00:00
} { }
2022-05-19 07:49:32 +00:00
if err := qb . repository . queryStruct ( ctx , aggregateQuery . toSQL ( includeSortPagination ) , query . args , & out ) ; err != nil {
2021-10-25 00:40:13 +00:00
return nil , err
}
ret := models . NewSceneQueryResult ( qb )
ret . Count = out . Total
2022-07-13 06:30:54 +00:00
ret . TotalDuration = out . Duration . Float64
ret . TotalSize = out . Size . Float64
2021-10-25 00:40:13 +00:00
return ret , nil
2019-02-09 12:30:49 +00:00
}
2022-07-14 23:29:03 +00:00
func sceneFileCountCriterionHandler ( qb * SceneStore , fileCount * models . IntCriterionInput ) criterionHandlerFunc {
h := countCriterionHandlerBuilder {
primaryTable : sceneTable ,
joinTable : scenesFilesTable ,
primaryFK : sceneIDColumn ,
}
return h . handler ( fileCount )
}
2022-08-08 04:24:08 +00:00
func scenePhashDuplicatedCriterionHandler ( duplicatedFilter * models . PHashDuplicationCriterionInput , addJoinFn func ( f * filterBuilder ) ) criterionHandlerFunc {
2022-05-19 07:49:32 +00:00
return func ( ctx context . Context , f * filterBuilder ) {
2022-02-16 00:11:57 +00:00
// TODO: Wishlist item: Implement Distance matching
if duplicatedFilter != nil {
2022-08-08 04:24:08 +00:00
if addJoinFn != nil {
addJoinFn ( f )
}
2022-02-16 00:11:57 +00:00
var v string
if * duplicatedFilter . Duplicated {
v = ">"
} else {
v = "="
}
2022-08-08 04:24:08 +00:00
f . addInnerJoin ( "(SELECT file_id FROM files_fingerprints INNER JOIN (SELECT fingerprint FROM files_fingerprints WHERE type = 'phash' GROUP BY fingerprint HAVING COUNT (fingerprint) " + v + " 1) dupes on files_fingerprints.fingerprint = dupes.fingerprint)" , "scph" , "scenes_files.file_id = scph.file_id" )
2022-02-16 00:11:57 +00:00
}
}
}
2022-08-08 04:24:08 +00:00
func durationCriterionHandler ( durationFilter * models . IntCriterionInput , column string , addJoinFn func ( f * filterBuilder ) ) criterionHandlerFunc {
2022-05-19 07:49:32 +00:00
return func ( ctx context . Context , f * filterBuilder ) {
2021-03-02 00:27:36 +00:00
if durationFilter != nil {
2022-08-08 04:24:08 +00:00
if addJoinFn != nil {
addJoinFn ( f )
}
2021-08-12 00:24:16 +00:00
clause , args := getIntCriterionWhereClause ( "cast(" + column + " as int)" , * durationFilter )
f . addWhere ( clause , args ... )
2021-03-02 00:27:36 +00:00
}
}
}
2022-08-01 01:01:29 +00:00
func resolutionCriterionHandler ( resolution * models . ResolutionCriterionInput , heightColumn string , widthColumn string , addJoinFn func ( f * filterBuilder ) ) criterionHandlerFunc {
2022-05-19 07:49:32 +00:00
return func ( ctx context . Context , f * filterBuilder ) {
2021-08-02 03:22:39 +00:00
if resolution != nil && resolution . Value . IsValid ( ) {
2022-08-01 01:01:29 +00:00
if addJoinFn != nil {
addJoinFn ( f )
}
2021-08-02 03:22:39 +00:00
min := resolution . Value . GetMinResolution ( )
max := resolution . Value . GetMaxResolution ( )
2021-03-02 00:27:36 +00:00
widthHeight := fmt . Sprintf ( "MIN(%s, %s)" , widthColumn , heightColumn )
Enable gocritic (#1848)
* Don't capitalize local variables
ValidCodecs -> validCodecs
* Capitalize deprecation markers
A deprecated marker should be capitalized.
* Use re.MustCompile for static regexes
If the regex fails to compile, it's a programmer error, and should be
treated as such. The regex is entirely static.
* Simplify else-if constructions
Rewrite
else { if cond {}}
to
else if cond {}
* Use a switch statement to analyze formats
Break an if-else chain. While here, simplify code flow.
Also introduce a proper static error for unsupported image formats,
paving the way for being able to check against the error.
* Rewrite ifElse chains into switch statements
The "Effective Go" https://golang.org/doc/effective_go#switch document
mentions it is more idiomatic to write if-else chains as switches when
it is possible.
Find all the plain rewrite occurrences in the code base and rewrite.
In some cases, the if-else chains are replaced by a switch scrutinizer.
That is, the code sequence
if x == 1 {
..
} else if x == 2 {
..
} else if x == 3 {
...
}
can be rewritten into
switch x {
case 1:
..
case 2:
..
case 3:
..
}
which is clearer for the compiler: it can decide if the switch is
better served by a jump-table then a branch-chain.
* Rewrite switches, introduce static errors
Introduce two new static errors:
* `ErrNotImplmented`
* `ErrNotSupported`
And use these rather than forming new generative errors whenever the
code is called. Code can now test on the errors (since they are static
and the pointers to them wont change).
Also rewrite ifElse chains into switches in this part of the code base.
* Introduce a StashBoxError in configuration
Since all stashbox errors are the same, treat them as such in the code
base. While here, rewrite an ifElse chain.
In the future, it might be beneifical to refactor configuration errors
into one error which can handle missing fields, which context the error
occurs in and so on. But for now, try to get an overview of the error
categories by hoisting them into static errors.
* Get rid of an else-block in transaction handling
If we succesfully `recover()`, we then always `panic()`. This means the
rest of the code is not reachable, so we can avoid having an else-block
here.
It also solves an ifElse-chain style check in the code base.
* Use strings.ReplaceAll
Rewrite
strings.Replace(s, o, n, -1)
into
strings.ReplaceAll(s, o, n)
To make it consistent and clear that we are doing an all-replace in the
string rather than replacing parts of it. It's more of a nitpick since
there are no implementation differences: the stdlib implementation is
just to supply -1.
* Rewrite via gocritic's assignOp
Statements of the form
x = x + e
is rewritten into
x += e
where applicable.
* Formatting
* Review comments handled
Stash-box is a proper noun.
Rewrite a switch into an if-chain which returns on the first error
encountered.
* Use context.TODO() over context.Background()
Patch in the same vein as everything else: use the TODO() marker so we
can search for it later and link it into the context tree/tentacle once
it reaches down to this level in the code base.
* Tell the linter to ignore a section in manager_tasks.go
The section is less readable, so mark it with a nolint for now. Because
the rewrite enables a ifElseChain, also mark that as nolint for now.
* Use strings.ReplaceAll over strings.Replace
* Apply an ifElse rewrite
else { if .. { .. } } rewrite into else if { .. }
* Use switch-statements over ifElseChains
Rewrite chains of if-else into switch statements. Where applicable,
add an early nil-guard to simplify case analysis. Also, in
ScanTask's Start(..), invert the logic to outdent the whole block, and
help the reader: if it's not a scene, the function flow is now far more
local to the top of the function, and it's clear that the rest of the
function has to do with scene management.
* Enable gocritic on the code base.
Disable appendAssign for now since we aren't passing that check yet.
* Document the nolint additions
* Document StashBoxBatchPerformerTagInput
2021-10-18 03:12:40 +00:00
switch resolution . Modifier {
case models . CriterionModifierEquals :
2021-08-02 03:22:39 +00:00
f . addWhere ( fmt . Sprintf ( "%s BETWEEN %d AND %d" , widthHeight , min , max ) )
Enable gocritic (#1848)
* Don't capitalize local variables
ValidCodecs -> validCodecs
* Capitalize deprecation markers
A deprecated marker should be capitalized.
* Use re.MustCompile for static regexes
If the regex fails to compile, it's a programmer error, and should be
treated as such. The regex is entirely static.
* Simplify else-if constructions
Rewrite
else { if cond {}}
to
else if cond {}
* Use a switch statement to analyze formats
Break an if-else chain. While here, simplify code flow.
Also introduce a proper static error for unsupported image formats,
paving the way for being able to check against the error.
* Rewrite ifElse chains into switch statements
The "Effective Go" https://golang.org/doc/effective_go#switch document
mentions it is more idiomatic to write if-else chains as switches when
it is possible.
Find all the plain rewrite occurrences in the code base and rewrite.
In some cases, the if-else chains are replaced by a switch scrutinizer.
That is, the code sequence
if x == 1 {
..
} else if x == 2 {
..
} else if x == 3 {
...
}
can be rewritten into
switch x {
case 1:
..
case 2:
..
case 3:
..
}
which is clearer for the compiler: it can decide if the switch is
better served by a jump-table then a branch-chain.
* Rewrite switches, introduce static errors
Introduce two new static errors:
* `ErrNotImplmented`
* `ErrNotSupported`
And use these rather than forming new generative errors whenever the
code is called. Code can now test on the errors (since they are static
and the pointers to them wont change).
Also rewrite ifElse chains into switches in this part of the code base.
* Introduce a StashBoxError in configuration
Since all stashbox errors are the same, treat them as such in the code
base. While here, rewrite an ifElse chain.
In the future, it might be beneifical to refactor configuration errors
into one error which can handle missing fields, which context the error
occurs in and so on. But for now, try to get an overview of the error
categories by hoisting them into static errors.
* Get rid of an else-block in transaction handling
If we succesfully `recover()`, we then always `panic()`. This means the
rest of the code is not reachable, so we can avoid having an else-block
here.
It also solves an ifElse-chain style check in the code base.
* Use strings.ReplaceAll
Rewrite
strings.Replace(s, o, n, -1)
into
strings.ReplaceAll(s, o, n)
To make it consistent and clear that we are doing an all-replace in the
string rather than replacing parts of it. It's more of a nitpick since
there are no implementation differences: the stdlib implementation is
just to supply -1.
* Rewrite via gocritic's assignOp
Statements of the form
x = x + e
is rewritten into
x += e
where applicable.
* Formatting
* Review comments handled
Stash-box is a proper noun.
Rewrite a switch into an if-chain which returns on the first error
encountered.
* Use context.TODO() over context.Background()
Patch in the same vein as everything else: use the TODO() marker so we
can search for it later and link it into the context tree/tentacle once
it reaches down to this level in the code base.
* Tell the linter to ignore a section in manager_tasks.go
The section is less readable, so mark it with a nolint for now. Because
the rewrite enables a ifElseChain, also mark that as nolint for now.
* Use strings.ReplaceAll over strings.Replace
* Apply an ifElse rewrite
else { if .. { .. } } rewrite into else if { .. }
* Use switch-statements over ifElseChains
Rewrite chains of if-else into switch statements. Where applicable,
add an early nil-guard to simplify case analysis. Also, in
ScanTask's Start(..), invert the logic to outdent the whole block, and
help the reader: if it's not a scene, the function flow is now far more
local to the top of the function, and it's clear that the rest of the
function has to do with scene management.
* Enable gocritic on the code base.
Disable appendAssign for now since we aren't passing that check yet.
* Document the nolint additions
* Document StashBoxBatchPerformerTagInput
2021-10-18 03:12:40 +00:00
case models . CriterionModifierNotEquals :
2021-08-02 03:22:39 +00:00
f . addWhere ( fmt . Sprintf ( "%s NOT BETWEEN %d AND %d" , widthHeight , min , max ) )
Enable gocritic (#1848)
* Don't capitalize local variables
ValidCodecs -> validCodecs
* Capitalize deprecation markers
A deprecated marker should be capitalized.
* Use re.MustCompile for static regexes
If the regex fails to compile, it's a programmer error, and should be
treated as such. The regex is entirely static.
* Simplify else-if constructions
Rewrite
else { if cond {}}
to
else if cond {}
* Use a switch statement to analyze formats
Break an if-else chain. While here, simplify code flow.
Also introduce a proper static error for unsupported image formats,
paving the way for being able to check against the error.
* Rewrite ifElse chains into switch statements
The "Effective Go" https://golang.org/doc/effective_go#switch document
mentions it is more idiomatic to write if-else chains as switches when
it is possible.
Find all the plain rewrite occurrences in the code base and rewrite.
In some cases, the if-else chains are replaced by a switch scrutinizer.
That is, the code sequence
if x == 1 {
..
} else if x == 2 {
..
} else if x == 3 {
...
}
can be rewritten into
switch x {
case 1:
..
case 2:
..
case 3:
..
}
which is clearer for the compiler: it can decide if the switch is
better served by a jump-table then a branch-chain.
* Rewrite switches, introduce static errors
Introduce two new static errors:
* `ErrNotImplmented`
* `ErrNotSupported`
And use these rather than forming new generative errors whenever the
code is called. Code can now test on the errors (since they are static
and the pointers to them wont change).
Also rewrite ifElse chains into switches in this part of the code base.
* Introduce a StashBoxError in configuration
Since all stashbox errors are the same, treat them as such in the code
base. While here, rewrite an ifElse chain.
In the future, it might be beneifical to refactor configuration errors
into one error which can handle missing fields, which context the error
occurs in and so on. But for now, try to get an overview of the error
categories by hoisting them into static errors.
* Get rid of an else-block in transaction handling
If we succesfully `recover()`, we then always `panic()`. This means the
rest of the code is not reachable, so we can avoid having an else-block
here.
It also solves an ifElse-chain style check in the code base.
* Use strings.ReplaceAll
Rewrite
strings.Replace(s, o, n, -1)
into
strings.ReplaceAll(s, o, n)
To make it consistent and clear that we are doing an all-replace in the
string rather than replacing parts of it. It's more of a nitpick since
there are no implementation differences: the stdlib implementation is
just to supply -1.
* Rewrite via gocritic's assignOp
Statements of the form
x = x + e
is rewritten into
x += e
where applicable.
* Formatting
* Review comments handled
Stash-box is a proper noun.
Rewrite a switch into an if-chain which returns on the first error
encountered.
* Use context.TODO() over context.Background()
Patch in the same vein as everything else: use the TODO() marker so we
can search for it later and link it into the context tree/tentacle once
it reaches down to this level in the code base.
* Tell the linter to ignore a section in manager_tasks.go
The section is less readable, so mark it with a nolint for now. Because
the rewrite enables a ifElseChain, also mark that as nolint for now.
* Use strings.ReplaceAll over strings.Replace
* Apply an ifElse rewrite
else { if .. { .. } } rewrite into else if { .. }
* Use switch-statements over ifElseChains
Rewrite chains of if-else into switch statements. Where applicable,
add an early nil-guard to simplify case analysis. Also, in
ScanTask's Start(..), invert the logic to outdent the whole block, and
help the reader: if it's not a scene, the function flow is now far more
local to the top of the function, and it's clear that the rest of the
function has to do with scene management.
* Enable gocritic on the code base.
Disable appendAssign for now since we aren't passing that check yet.
* Document the nolint additions
* Document StashBoxBatchPerformerTagInput
2021-10-18 03:12:40 +00:00
case models . CriterionModifierLessThan :
2021-08-02 03:22:39 +00:00
f . addWhere ( fmt . Sprintf ( "%s < %d" , widthHeight , min ) )
Enable gocritic (#1848)
* Don't capitalize local variables
ValidCodecs -> validCodecs
* Capitalize deprecation markers
A deprecated marker should be capitalized.
* Use re.MustCompile for static regexes
If the regex fails to compile, it's a programmer error, and should be
treated as such. The regex is entirely static.
* Simplify else-if constructions
Rewrite
else { if cond {}}
to
else if cond {}
* Use a switch statement to analyze formats
Break an if-else chain. While here, simplify code flow.
Also introduce a proper static error for unsupported image formats,
paving the way for being able to check against the error.
* Rewrite ifElse chains into switch statements
The "Effective Go" https://golang.org/doc/effective_go#switch document
mentions it is more idiomatic to write if-else chains as switches when
it is possible.
Find all the plain rewrite occurrences in the code base and rewrite.
In some cases, the if-else chains are replaced by a switch scrutinizer.
That is, the code sequence
if x == 1 {
..
} else if x == 2 {
..
} else if x == 3 {
...
}
can be rewritten into
switch x {
case 1:
..
case 2:
..
case 3:
..
}
which is clearer for the compiler: it can decide if the switch is
better served by a jump-table then a branch-chain.
* Rewrite switches, introduce static errors
Introduce two new static errors:
* `ErrNotImplmented`
* `ErrNotSupported`
And use these rather than forming new generative errors whenever the
code is called. Code can now test on the errors (since they are static
and the pointers to them wont change).
Also rewrite ifElse chains into switches in this part of the code base.
* Introduce a StashBoxError in configuration
Since all stashbox errors are the same, treat them as such in the code
base. While here, rewrite an ifElse chain.
In the future, it might be beneifical to refactor configuration errors
into one error which can handle missing fields, which context the error
occurs in and so on. But for now, try to get an overview of the error
categories by hoisting them into static errors.
* Get rid of an else-block in transaction handling
If we succesfully `recover()`, we then always `panic()`. This means the
rest of the code is not reachable, so we can avoid having an else-block
here.
It also solves an ifElse-chain style check in the code base.
* Use strings.ReplaceAll
Rewrite
strings.Replace(s, o, n, -1)
into
strings.ReplaceAll(s, o, n)
To make it consistent and clear that we are doing an all-replace in the
string rather than replacing parts of it. It's more of a nitpick since
there are no implementation differences: the stdlib implementation is
just to supply -1.
* Rewrite via gocritic's assignOp
Statements of the form
x = x + e
is rewritten into
x += e
where applicable.
* Formatting
* Review comments handled
Stash-box is a proper noun.
Rewrite a switch into an if-chain which returns on the first error
encountered.
* Use context.TODO() over context.Background()
Patch in the same vein as everything else: use the TODO() marker so we
can search for it later and link it into the context tree/tentacle once
it reaches down to this level in the code base.
* Tell the linter to ignore a section in manager_tasks.go
The section is less readable, so mark it with a nolint for now. Because
the rewrite enables a ifElseChain, also mark that as nolint for now.
* Use strings.ReplaceAll over strings.Replace
* Apply an ifElse rewrite
else { if .. { .. } } rewrite into else if { .. }
* Use switch-statements over ifElseChains
Rewrite chains of if-else into switch statements. Where applicable,
add an early nil-guard to simplify case analysis. Also, in
ScanTask's Start(..), invert the logic to outdent the whole block, and
help the reader: if it's not a scene, the function flow is now far more
local to the top of the function, and it's clear that the rest of the
function has to do with scene management.
* Enable gocritic on the code base.
Disable appendAssign for now since we aren't passing that check yet.
* Document the nolint additions
* Document StashBoxBatchPerformerTagInput
2021-10-18 03:12:40 +00:00
case models . CriterionModifierGreaterThan :
2021-08-02 03:22:39 +00:00
f . addWhere ( fmt . Sprintf ( "%s > %d" , widthHeight , max ) )
2021-03-02 00:27:36 +00:00
}
}
}
}
func hasMarkersCriterionHandler ( hasMarkers * string ) criterionHandlerFunc {
2022-05-19 07:49:32 +00:00
return func ( ctx context . Context , f * filterBuilder ) {
2021-03-02 00:27:36 +00:00
if hasMarkers != nil {
2021-12-06 01:30:40 +00:00
f . addLeftJoin ( "scene_markers" , "" , "scene_markers.scene_id = scenes.id" )
2021-03-02 00:27:36 +00:00
if * hasMarkers == "true" {
f . addHaving ( "count(scene_markers.scene_id) > 0" )
} else {
f . addWhere ( "scene_markers.id IS NULL" )
}
}
}
}
2022-07-13 06:30:54 +00:00
func sceneIsMissingCriterionHandler ( qb * SceneStore , isMissing * string ) criterionHandlerFunc {
2022-05-19 07:49:32 +00:00
return func ( ctx context . Context , f * filterBuilder ) {
2021-03-02 00:27:36 +00:00
if isMissing != nil && * isMissing != "" {
switch * isMissing {
case "galleries" :
qb . galleriesRepository ( ) . join ( f , "galleries_join" , "scenes.id" )
f . addWhere ( "galleries_join.scene_id IS NULL" )
case "studio" :
f . addWhere ( "scenes.studio_id IS NULL" )
case "movie" :
qb . moviesRepository ( ) . join ( f , "movies_join" , "scenes.id" )
f . addWhere ( "movies_join.scene_id IS NULL" )
case "performers" :
qb . performersRepository ( ) . join ( f , "performers_join" , "scenes.id" )
f . addWhere ( "performers_join.scene_id IS NULL" )
case "date" :
2022-03-28 19:45:46 +00:00
f . addWhere ( ` scenes.date IS NULL OR scenes.date IS "" OR scenes.date IS "0001-01-01" ` )
2021-03-02 00:27:36 +00:00
case "tags" :
qb . tagsRepository ( ) . join ( f , "tags_join" , "scenes.id" )
f . addWhere ( "tags_join.scene_id IS NULL" )
2021-06-21 05:48:28 +00:00
case "stash_id" :
qb . stashIDRepository ( ) . join ( f , "scene_stash_ids" , "scenes.id" )
f . addWhere ( "scene_stash_ids.scene_id IS NULL" )
2022-07-13 06:30:54 +00:00
case "phash" :
2022-08-08 04:24:08 +00:00
qb . addSceneFilesTable ( f )
f . addLeftJoin ( fingerprintTable , "fingerprints_phash" , "scenes_files.file_id = fingerprints_phash.file_id AND fingerprints_phash.type = 'phash'" )
2022-07-13 06:30:54 +00:00
f . addWhere ( "fingerprints_phash.fingerprint IS NULL" )
2021-03-02 00:27:36 +00:00
default :
f . addWhere ( "(scenes." + * isMissing + " IS NULL OR TRIM(scenes." + * isMissing + ") = '')" )
}
}
}
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) getMultiCriterionHandlerBuilder ( foreignTable , joinTable , foreignFK string , addJoinsFunc func ( f * filterBuilder ) ) multiCriterionHandlerBuilder {
2021-03-02 00:27:36 +00:00
return multiCriterionHandlerBuilder {
primaryTable : sceneTable ,
foreignTable : foreignTable ,
joinTable : joinTable ,
primaryFK : sceneIDColumn ,
foreignFK : foreignFK ,
addJoinsFunc : addJoinsFunc ,
}
}
2021-04-09 08:46:00 +00:00
2022-07-13 06:30:54 +00:00
func sceneCaptionCriterionHandler ( qb * SceneStore , captions * models . StringCriterionInput ) criterionHandlerFunc {
2022-05-06 01:59:28 +00:00
h := stringListCriterionHandlerBuilder {
2022-07-13 06:30:54 +00:00
joinTable : videoCaptionsTable ,
stringColumn : captionCodeColumn ,
2022-05-06 01:59:28 +00:00
addJoinTable : func ( f * filterBuilder ) {
2022-08-08 04:24:08 +00:00
qb . addSceneFilesTable ( f )
f . addLeftJoin ( videoCaptionsTable , "" , "video_captions.file_id = scenes_files.file_id" )
2022-05-06 01:59:28 +00:00
} ,
}
return h . handler ( captions )
}
2022-07-13 06:30:54 +00:00
func sceneTagsCriterionHandler ( qb * SceneStore , tags * models . HierarchicalMultiCriterionInput ) criterionHandlerFunc {
Tag hierarchy (#1519)
* Add migration script for tag relations table
* Expand hierarchical filter features
Expand the features of the hierarchical multi input filter with support
for using a relations table, which only has parent_id and child_id
columns, and support adding an additional intermediate table to join on,
for example for scenes and tags which are linked by the scenes_tags
table as well.
* Add hierarchical filtering for tags
* Add hierarchical tags support to scene markers
Refactor filtering of scene markers to filterBuilder and in the process
add support for hierarchical tags as well.
* List parent and child tags on tag details page
* Support setting parent and child tags
Add support for setting parent and child tags during tag creation and
tag updates.
* Validate no loops are created in tags hierarchy
* Update tag merging to support tag hierarcy
* Add unit tests for tags.EnsureUniqueHierarchy
* Fix applying recursive to with clause
The SQL `RECURSIVE` of a `WITH` clause only needs to be applied once,
imediately after the `WITH`. So this fixes the query building to do just
that, automatically applying the `RECURSIVE` keyword when any added with
clause is added as recursive.
* Rename hierarchical root id column
* Rewrite hierarchical filtering for performance
Completely rewrite the hierarchical filtering to optimize for
performance. Doing the recursive query in combination with a complex
query seems to break SQLite optimizing some things which means that the
recursive part might be 2,5 second slower than adding a static
`VALUES()` list. This is mostly noticable in case of the tag hierarchy
where setting an exclusion with any depth (or depth: all) being applied
has this performance impact of 2,5 second. "Include" also suffered this
issue, but some rewritten query by joining in the *_tags table in one
pass and applying a `WHERE x IS NOT NULL` filter did seem to optimize
that case. But that optimization isn't applied to the `IS NULL` filter
of "exclude". Running a simple query beforehand to get all (recursive)
items and then applying them to the query doesn't have this performance
penalty.
* Remove UI references to child studios and tags
* Add parents to tag export
* Support importing of parent relationship for tags
* Assign stable ids to parent / child badges
* Silence Apollo warning on parents/children fields on tags
Silence warning triggered by Apollo GraphQL by explicitly instructing it
to use the incoming parents/children values. By default it already does
this, but it triggers a warning as it might be unintended that it uses
the incoming values (instead of for example merging both arrays).
Setting merge to false still applies the same behaviour (use only
incoming values) but silences the warning as it's explicitly configured
to work like this.
* Rework detecting unique tag hierarchy
Completely rework the unique tag hierarchy to detect invalid hierarchies
for which a tag is "added in the middle". So when there are tags A <- B
and A <- C, you could previously edit tag B and add tag C as a sub tag
without it being noticed as parent A being applied twice (to tag C).
While afterwards saving tag C would fail as tag A was applied as parent
twice. The updated code correctly detects this scenario as well.
Furthermore the error messaging has been reworked a bit and the message
now mentions both the direct parent / sub tag as well as the tag which
would results in the error. So in aboves example it would now show the
message that tag C can't be applied because tag A already is a parent.
* Update relations on cached tags when needed
Update the relations on cached tags when a tag is created / updated /
deleted so these always reflect the correct state. Otherwise (re)opening
a tag might still show the old relations untill the page is fully
reloaded or the list is navigated. But this obviously is strange when
you for example have tag A, create or update tag B to have a relation to
tag A, and from tags B page click through to tag A and it doesn't show
that it is linked to tag B.
2021-09-09 04:58:43 +00:00
h := joinedHierarchicalMultiCriterionHandlerBuilder {
tx : qb . tx ,
primaryTable : sceneTable ,
foreignTable : tagTable ,
foreignFK : "tag_id" ,
relationsTable : "tags_relations" ,
joinAs : "scene_tag" ,
joinTable : scenesTagsTable ,
primaryFK : sceneIDColumn ,
2021-03-02 00:27:36 +00:00
}
return h . handler ( tags )
}
2022-07-13 06:30:54 +00:00
func sceneTagCountCriterionHandler ( qb * SceneStore , tagCount * models . IntCriterionInput ) criterionHandlerFunc {
2021-04-09 08:46:00 +00:00
h := countCriterionHandlerBuilder {
primaryTable : sceneTable ,
joinTable : scenesTagsTable ,
primaryFK : sceneIDColumn ,
}
return h . handler ( tagCount )
}
2022-07-13 06:30:54 +00:00
func scenePerformersCriterionHandler ( qb * SceneStore , performers * models . MultiCriterionInput ) criterionHandlerFunc {
2021-05-09 09:25:57 +00:00
h := joinedMultiCriterionHandlerBuilder {
primaryTable : sceneTable ,
joinTable : performersScenesTable ,
joinAs : "performers_join" ,
primaryFK : sceneIDColumn ,
foreignFK : performerIDColumn ,
addJoinTable : func ( f * filterBuilder ) {
qb . performersRepository ( ) . join ( f , "performers_join" , "scenes.id" )
} ,
2021-03-02 00:27:36 +00:00
}
return h . handler ( performers )
}
2022-07-13 06:30:54 +00:00
func scenePerformerCountCriterionHandler ( qb * SceneStore , performerCount * models . IntCriterionInput ) criterionHandlerFunc {
2021-04-09 08:46:00 +00:00
h := countCriterionHandlerBuilder {
primaryTable : sceneTable ,
joinTable : performersScenesTable ,
primaryFK : sceneIDColumn ,
}
return h . handler ( performerCount )
}
2022-02-16 00:11:57 +00:00
func scenePerformerFavoriteCriterionHandler ( performerfavorite * bool ) criterionHandlerFunc {
2022-05-19 07:49:32 +00:00
return func ( ctx context . Context , f * filterBuilder ) {
2022-02-16 00:11:57 +00:00
if performerfavorite != nil {
f . addLeftJoin ( "performers_scenes" , "" , "scenes.id = performers_scenes.scene_id" )
if * performerfavorite {
// contains at least one favorite
f . addLeftJoin ( "performers" , "" , "performers.id = performers_scenes.performer_id" )
f . addWhere ( "performers.favorite = 1" )
} else {
// contains zero favorites
f . addLeftJoin ( ` ( SELECT performers_scenes . scene_id as id FROM performers_scenes
JOIN performers ON performers . id = performers_scenes . performer_id
GROUP BY performers_scenes . scene_id HAVING SUM ( performers . favorite ) = 0 ) ` , "nofaves" , "scenes.id = nofaves.id" )
f . addWhere ( "performers_scenes.scene_id IS NULL OR nofaves.id IS NOT NULL" )
}
}
}
}
func scenePerformerAgeCriterionHandler ( performerAge * models . IntCriterionInput ) criterionHandlerFunc {
2022-05-19 07:49:32 +00:00
return func ( ctx context . Context , f * filterBuilder ) {
2022-02-16 00:11:57 +00:00
if performerAge != nil {
f . addInnerJoin ( "performers_scenes" , "" , "scenes.id = performers_scenes.scene_id" )
f . addInnerJoin ( "performers" , "" , "performers_scenes.performer_id = performers.id" )
f . addWhere ( "scenes.date != '' AND performers.birthdate != ''" )
f . addWhere ( "scenes.date IS NOT NULL AND performers.birthdate IS NOT NULL" )
f . addWhere ( "scenes.date != '0001-01-01' AND performers.birthdate != '0001-01-01'" )
ageCalc := "cast(strftime('%Y.%m%d', scenes.date) - strftime('%Y.%m%d', performers.birthdate) as int)"
whereClause , args := getIntWhereClause ( ageCalc , performerAge . Modifier , performerAge . Value , performerAge . Value2 )
f . addWhere ( whereClause , args ... )
}
}
}
2022-07-13 06:30:54 +00:00
func sceneStudioCriterionHandler ( qb * SceneStore , studios * models . HierarchicalMultiCriterionInput ) criterionHandlerFunc {
2021-06-03 10:52:19 +00:00
h := hierarchicalMultiCriterionHandlerBuilder {
Tag hierarchy (#1519)
* Add migration script for tag relations table
* Expand hierarchical filter features
Expand the features of the hierarchical multi input filter with support
for using a relations table, which only has parent_id and child_id
columns, and support adding an additional intermediate table to join on,
for example for scenes and tags which are linked by the scenes_tags
table as well.
* Add hierarchical filtering for tags
* Add hierarchical tags support to scene markers
Refactor filtering of scene markers to filterBuilder and in the process
add support for hierarchical tags as well.
* List parent and child tags on tag details page
* Support setting parent and child tags
Add support for setting parent and child tags during tag creation and
tag updates.
* Validate no loops are created in tags hierarchy
* Update tag merging to support tag hierarcy
* Add unit tests for tags.EnsureUniqueHierarchy
* Fix applying recursive to with clause
The SQL `RECURSIVE` of a `WITH` clause only needs to be applied once,
imediately after the `WITH`. So this fixes the query building to do just
that, automatically applying the `RECURSIVE` keyword when any added with
clause is added as recursive.
* Rename hierarchical root id column
* Rewrite hierarchical filtering for performance
Completely rewrite the hierarchical filtering to optimize for
performance. Doing the recursive query in combination with a complex
query seems to break SQLite optimizing some things which means that the
recursive part might be 2,5 second slower than adding a static
`VALUES()` list. This is mostly noticable in case of the tag hierarchy
where setting an exclusion with any depth (or depth: all) being applied
has this performance impact of 2,5 second. "Include" also suffered this
issue, but some rewritten query by joining in the *_tags table in one
pass and applying a `WHERE x IS NOT NULL` filter did seem to optimize
that case. But that optimization isn't applied to the `IS NULL` filter
of "exclude". Running a simple query beforehand to get all (recursive)
items and then applying them to the query doesn't have this performance
penalty.
* Remove UI references to child studios and tags
* Add parents to tag export
* Support importing of parent relationship for tags
* Assign stable ids to parent / child badges
* Silence Apollo warning on parents/children fields on tags
Silence warning triggered by Apollo GraphQL by explicitly instructing it
to use the incoming parents/children values. By default it already does
this, but it triggers a warning as it might be unintended that it uses
the incoming values (instead of for example merging both arrays).
Setting merge to false still applies the same behaviour (use only
incoming values) but silences the warning as it's explicitly configured
to work like this.
* Rework detecting unique tag hierarchy
Completely rework the unique tag hierarchy to detect invalid hierarchies
for which a tag is "added in the middle". So when there are tags A <- B
and A <- C, you could previously edit tag B and add tag C as a sub tag
without it being noticed as parent A being applied twice (to tag C).
While afterwards saving tag C would fail as tag A was applied as parent
twice. The updated code correctly detects this scenario as well.
Furthermore the error messaging has been reworked a bit and the message
now mentions both the direct parent / sub tag as well as the tag which
would results in the error. So in aboves example it would now show the
message that tag C can't be applied because tag A already is a parent.
* Update relations on cached tags when needed
Update the relations on cached tags when a tag is created / updated /
deleted so these always reflect the correct state. Otherwise (re)opening
a tag might still show the old relations untill the page is fully
reloaded or the list is navigated. But this obviously is strange when
you for example have tag A, create or update tag B to have a relation to
tag A, and from tags B page click through to tag A and it doesn't show
that it is linked to tag B.
2021-09-09 04:58:43 +00:00
tx : qb . tx ,
2021-06-03 10:52:19 +00:00
primaryTable : sceneTable ,
foreignTable : studioTable ,
foreignFK : studioIDColumn ,
parentFK : "parent_id" ,
2021-03-02 00:27:36 +00:00
}
return h . handler ( studios )
}
2022-07-13 06:30:54 +00:00
func sceneMoviesCriterionHandler ( qb * SceneStore , movies * models . MultiCriterionInput ) criterionHandlerFunc {
2021-03-02 00:27:36 +00:00
addJoinsFunc := func ( f * filterBuilder ) {
2022-03-25 01:00:51 +00:00
qb . moviesRepository ( ) . join ( f , "" , "scenes.id" )
f . addLeftJoin ( "movies" , "" , "movies_scenes.movie_id = movies.id" )
2021-03-02 00:27:36 +00:00
}
h := qb . getMultiCriterionHandlerBuilder ( movieTable , moviesScenesTable , "movie_id" , addJoinsFunc )
return h . handler ( movies )
}
2022-07-13 06:30:54 +00:00
func scenePerformerTagsCriterionHandler ( qb * SceneStore , tags * models . HierarchicalMultiCriterionInput ) criterionHandlerFunc {
2022-05-19 07:49:32 +00:00
return func ( ctx context . Context , f * filterBuilder ) {
2021-11-06 22:34:33 +00:00
if tags != nil {
if tags . Modifier == models . CriterionModifierIsNull || tags . Modifier == models . CriterionModifierNotNull {
var notClause string
if tags . Modifier == models . CriterionModifierNotNull {
notClause = "NOT"
}
2021-12-06 01:30:40 +00:00
f . addLeftJoin ( "performers_scenes" , "" , "scenes.id = performers_scenes.scene_id" )
f . addLeftJoin ( "performers_tags" , "" , "performers_scenes.performer_id = performers_tags.performer_id" )
2021-11-06 22:34:33 +00:00
f . addWhere ( fmt . Sprintf ( "performers_tags.tag_id IS %s NULL" , notClause ) )
return
}
if len ( tags . Value ) == 0 {
return
}
2022-05-19 07:49:32 +00:00
valuesClause := getHierarchicalValues ( ctx , qb . tx , tags . Value , tagTable , "tags_relations" , "" , tags . Depth )
2021-03-10 01:25:51 +00:00
Tag hierarchy (#1519)
* Add migration script for tag relations table
* Expand hierarchical filter features
Expand the features of the hierarchical multi input filter with support
for using a relations table, which only has parent_id and child_id
columns, and support adding an additional intermediate table to join on,
for example for scenes and tags which are linked by the scenes_tags
table as well.
* Add hierarchical filtering for tags
* Add hierarchical tags support to scene markers
Refactor filtering of scene markers to filterBuilder and in the process
add support for hierarchical tags as well.
* List parent and child tags on tag details page
* Support setting parent and child tags
Add support for setting parent and child tags during tag creation and
tag updates.
* Validate no loops are created in tags hierarchy
* Update tag merging to support tag hierarcy
* Add unit tests for tags.EnsureUniqueHierarchy
* Fix applying recursive to with clause
The SQL `RECURSIVE` of a `WITH` clause only needs to be applied once,
imediately after the `WITH`. So this fixes the query building to do just
that, automatically applying the `RECURSIVE` keyword when any added with
clause is added as recursive.
* Rename hierarchical root id column
* Rewrite hierarchical filtering for performance
Completely rewrite the hierarchical filtering to optimize for
performance. Doing the recursive query in combination with a complex
query seems to break SQLite optimizing some things which means that the
recursive part might be 2,5 second slower than adding a static
`VALUES()` list. This is mostly noticable in case of the tag hierarchy
where setting an exclusion with any depth (or depth: all) being applied
has this performance impact of 2,5 second. "Include" also suffered this
issue, but some rewritten query by joining in the *_tags table in one
pass and applying a `WHERE x IS NOT NULL` filter did seem to optimize
that case. But that optimization isn't applied to the `IS NULL` filter
of "exclude". Running a simple query beforehand to get all (recursive)
items and then applying them to the query doesn't have this performance
penalty.
* Remove UI references to child studios and tags
* Add parents to tag export
* Support importing of parent relationship for tags
* Assign stable ids to parent / child badges
* Silence Apollo warning on parents/children fields on tags
Silence warning triggered by Apollo GraphQL by explicitly instructing it
to use the incoming parents/children values. By default it already does
this, but it triggers a warning as it might be unintended that it uses
the incoming values (instead of for example merging both arrays).
Setting merge to false still applies the same behaviour (use only
incoming values) but silences the warning as it's explicitly configured
to work like this.
* Rework detecting unique tag hierarchy
Completely rework the unique tag hierarchy to detect invalid hierarchies
for which a tag is "added in the middle". So when there are tags A <- B
and A <- C, you could previously edit tag B and add tag C as a sub tag
without it being noticed as parent A being applied twice (to tag C).
While afterwards saving tag C would fail as tag A was applied as parent
twice. The updated code correctly detects this scenario as well.
Furthermore the error messaging has been reworked a bit and the message
now mentions both the direct parent / sub tag as well as the tag which
would results in the error. So in aboves example it would now show the
message that tag C can't be applied because tag A already is a parent.
* Update relations on cached tags when needed
Update the relations on cached tags when a tag is created / updated /
deleted so these always reflect the correct state. Otherwise (re)opening
a tag might still show the old relations untill the page is fully
reloaded or the list is navigated. But this obviously is strange when
you for example have tag A, create or update tag B to have a relation to
tag A, and from tags B page click through to tag A and it doesn't show
that it is linked to tag B.
2021-09-09 04:58:43 +00:00
f . addWith ( ` performer_tags AS (
SELECT ps . scene_id , t . column1 AS root_tag_id FROM performers_scenes ps
INNER JOIN performers_tags pt ON pt . performer_id = ps . performer_id
INNER JOIN ( ` + valuesClause + ` ) t ON t . column2 = pt . tag_id
) ` )
2021-03-10 01:25:51 +00:00
2021-12-06 01:30:40 +00:00
f . addLeftJoin ( "performer_tags" , "" , "performer_tags.scene_id = scenes.id" )
Tag hierarchy (#1519)
* Add migration script for tag relations table
* Expand hierarchical filter features
Expand the features of the hierarchical multi input filter with support
for using a relations table, which only has parent_id and child_id
columns, and support adding an additional intermediate table to join on,
for example for scenes and tags which are linked by the scenes_tags
table as well.
* Add hierarchical filtering for tags
* Add hierarchical tags support to scene markers
Refactor filtering of scene markers to filterBuilder and in the process
add support for hierarchical tags as well.
* List parent and child tags on tag details page
* Support setting parent and child tags
Add support for setting parent and child tags during tag creation and
tag updates.
* Validate no loops are created in tags hierarchy
* Update tag merging to support tag hierarcy
* Add unit tests for tags.EnsureUniqueHierarchy
* Fix applying recursive to with clause
The SQL `RECURSIVE` of a `WITH` clause only needs to be applied once,
imediately after the `WITH`. So this fixes the query building to do just
that, automatically applying the `RECURSIVE` keyword when any added with
clause is added as recursive.
* Rename hierarchical root id column
* Rewrite hierarchical filtering for performance
Completely rewrite the hierarchical filtering to optimize for
performance. Doing the recursive query in combination with a complex
query seems to break SQLite optimizing some things which means that the
recursive part might be 2,5 second slower than adding a static
`VALUES()` list. This is mostly noticable in case of the tag hierarchy
where setting an exclusion with any depth (or depth: all) being applied
has this performance impact of 2,5 second. "Include" also suffered this
issue, but some rewritten query by joining in the *_tags table in one
pass and applying a `WHERE x IS NOT NULL` filter did seem to optimize
that case. But that optimization isn't applied to the `IS NULL` filter
of "exclude". Running a simple query beforehand to get all (recursive)
items and then applying them to the query doesn't have this performance
penalty.
* Remove UI references to child studios and tags
* Add parents to tag export
* Support importing of parent relationship for tags
* Assign stable ids to parent / child badges
* Silence Apollo warning on parents/children fields on tags
Silence warning triggered by Apollo GraphQL by explicitly instructing it
to use the incoming parents/children values. By default it already does
this, but it triggers a warning as it might be unintended that it uses
the incoming values (instead of for example merging both arrays).
Setting merge to false still applies the same behaviour (use only
incoming values) but silences the warning as it's explicitly configured
to work like this.
* Rework detecting unique tag hierarchy
Completely rework the unique tag hierarchy to detect invalid hierarchies
for which a tag is "added in the middle". So when there are tags A <- B
and A <- C, you could previously edit tag B and add tag C as a sub tag
without it being noticed as parent A being applied twice (to tag C).
While afterwards saving tag C would fail as tag A was applied as parent
twice. The updated code correctly detects this scenario as well.
Furthermore the error messaging has been reworked a bit and the message
now mentions both the direct parent / sub tag as well as the tag which
would results in the error. So in aboves example it would now show the
message that tag C can't be applied because tag A already is a parent.
* Update relations on cached tags when needed
Update the relations on cached tags when a tag is created / updated /
deleted so these always reflect the correct state. Otherwise (re)opening
a tag might still show the old relations untill the page is fully
reloaded or the list is navigated. But this obviously is strange when
you for example have tag A, create or update tag B to have a relation to
tag A, and from tags B page click through to tag A and it doesn't show
that it is linked to tag B.
2021-09-09 04:58:43 +00:00
addHierarchicalConditionClauses ( f , tags , "performer_tags" , "root_tag_id" )
2021-03-10 01:25:51 +00:00
}
}
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) setSceneSort ( query * queryBuilder , findFilter * models . FindFilterType ) {
2022-03-09 01:01:56 +00:00
if findFilter == nil || findFilter . Sort == nil || * findFilter . Sort == "" {
2021-04-09 08:46:00 +00:00
return
2019-02-09 12:30:49 +00:00
}
2019-02-14 22:53:32 +00:00
sort := findFilter . GetSort ( "title" )
2022-07-13 06:30:54 +00:00
2022-08-08 04:24:08 +00:00
addFileTable := func ( ) {
query . addJoins (
join {
table : scenesFilesTable ,
onClause : "scenes_files.scene_id = scenes.id" ,
} ,
join {
table : fileTable ,
onClause : "scenes_files.file_id = files.id" ,
} ,
)
}
addVideoFileTable := func ( ) {
addFileTable ( )
query . addJoins (
join {
table : videoFileTable ,
onClause : "video_files.file_id = scenes_files.file_id" ,
} ,
)
2022-07-13 06:30:54 +00:00
}
2022-09-25 02:07:55 +00:00
addFolderTable := func ( ) {
query . addJoins (
join {
table : folderTable ,
onClause : "files.parent_folder_id = folders.id" ,
} ,
)
}
2019-02-14 22:53:32 +00:00
direction := findFilter . GetDirection ( )
2021-04-09 08:46:00 +00:00
switch sort {
2021-04-22 02:22:51 +00:00
case "movie_scene_number" :
2021-05-27 23:01:03 +00:00
query . join ( moviesScenesTable , "movies_join" , "scenes.id = movies_join.scene_id" )
2021-04-22 02:22:51 +00:00
query . sortAndPagination += fmt . Sprintf ( " ORDER BY movies_join.scene_index %s" , getSortDirection ( direction ) )
2021-04-09 08:46:00 +00:00
case "tag_count" :
query . sortAndPagination += getCountSort ( sceneTable , scenesTagsTable , sceneIDColumn , direction )
case "performer_count" :
query . sortAndPagination += getCountSort ( sceneTable , performersScenesTable , sceneIDColumn , direction )
2022-07-14 23:29:03 +00:00
case "file_count" :
query . sortAndPagination += getCountSort ( sceneTable , scenesFilesTable , sceneIDColumn , direction )
2022-07-13 06:30:54 +00:00
case "path" :
// special handling for path
2022-08-08 04:24:08 +00:00
addFileTable ( )
2022-09-25 02:07:55 +00:00
addFolderTable ( )
2022-08-08 04:24:08 +00:00
query . sortAndPagination += fmt . Sprintf ( " ORDER BY folders.path %s, files.basename %[1]s" , direction )
2022-07-13 06:30:54 +00:00
case "perceptual_similarity" :
// special handling for phash
2022-08-08 04:24:08 +00:00
addFileTable ( )
query . addJoins (
join {
table : fingerprintTable ,
as : "fingerprints_phash" ,
onClause : "scenes_files.file_id = fingerprints_phash.file_id AND fingerprints_phash.type = 'phash'" ,
} ,
)
query . sortAndPagination += " ORDER BY fingerprints_phash.fingerprint " + direction + ", files.size DESC"
case "bitrate" :
sort = "bit_rate"
addVideoFileTable ( )
query . sortAndPagination += getSort ( sort , direction , videoFileTable )
case "file_mod_time" :
sort = "mod_time"
addFileTable ( )
query . sortAndPagination += getSort ( sort , direction , fileTable )
case "framerate" :
sort = "frame_rate"
addVideoFileTable ( )
query . sortAndPagination += getSort ( sort , direction , videoFileTable )
2022-08-24 06:37:20 +00:00
case "filesize" :
2022-08-08 04:24:08 +00:00
addFileTable ( )
query . sortAndPagination += getSort ( sort , direction , fileTable )
case "duration" :
addVideoFileTable ( )
query . sortAndPagination += getSort ( sort , direction , videoFileTable )
2022-08-24 06:37:20 +00:00
case "interactive" , "interactive_speed" :
addVideoFileTable ( )
query . sortAndPagination += getSort ( sort , direction , videoFileTable )
2022-09-25 02:07:55 +00:00
case "title" :
addFileTable ( )
addFolderTable ( )
query . sortAndPagination += " ORDER BY scenes.title COLLATE NATURAL_CS " + direction + ", folders.path " + direction + ", files.basename COLLATE NATURAL_CS " + direction
2021-04-09 08:46:00 +00:00
default :
2022-08-08 04:24:08 +00:00
query . sortAndPagination += getSort ( sort , direction , "scenes" )
2021-04-09 08:46:00 +00:00
}
2019-02-09 12:30:49 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) imageRepository ( ) * imageRepository {
2021-01-18 01:23:20 +00:00
return & imageRepository {
repository : repository {
tx : qb . tx ,
tableName : "scenes_cover" ,
idColumn : sceneIDColumn ,
} ,
imageColumn : "cover" ,
2019-02-09 12:30:49 +00:00
}
2021-01-18 01:23:20 +00:00
}
2019-02-09 12:30:49 +00:00
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) GetCover ( ctx context . Context , sceneID int ) ( [ ] byte , error ) {
2022-05-19 07:49:32 +00:00
return qb . imageRepository ( ) . get ( ctx , sceneID )
2021-01-18 01:23:20 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) UpdateCover ( ctx context . Context , sceneID int , image [ ] byte ) error {
2022-05-19 07:49:32 +00:00
return qb . imageRepository ( ) . replace ( ctx , sceneID , image )
2021-01-18 01:23:20 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) DestroyCover ( ctx context . Context , sceneID int ) error {
2022-05-19 07:49:32 +00:00
return qb . imageRepository ( ) . destroy ( ctx , [ ] int { sceneID } )
2021-01-18 01:23:20 +00:00
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) moviesRepository ( ) * repository {
2021-01-18 01:23:20 +00:00
return & repository {
tx : qb . tx ,
tableName : moviesScenesTable ,
idColumn : sceneIDColumn ,
2019-02-09 12:30:49 +00:00
}
2021-01-18 01:23:20 +00:00
}
2022-08-12 02:21:46 +00:00
func ( qb * SceneStore ) GetMovies ( ctx context . Context , id int ) ( ret [ ] models . MoviesScenes , err error ) {
ret = [ ] models . MoviesScenes { }
if err := qb . moviesRepository ( ) . getAll ( ctx , id , func ( rows * sqlx . Rows ) error {
var ms moviesScenesRow
if err := rows . StructScan ( & ms ) ; err != nil {
return err
}
ret = append ( ret , ms . resolve ( id ) )
return nil
} ) ; err != nil {
return nil , err
}
return ret , nil
}
2022-08-11 06:14:57 +00:00
func ( qb * SceneStore ) filesRepository ( ) * filesRepository {
return & filesRepository {
repository : repository {
tx : qb . tx ,
tableName : scenesFilesTable ,
idColumn : sceneIDColumn ,
} ,
}
}
2022-08-12 02:21:46 +00:00
func ( qb * SceneStore ) AddFileID ( ctx context . Context , id int , fileID file . ID ) error {
const firstPrimary = false
return scenesFilesTableMgr . insertJoins ( ctx , id , firstPrimary , [ ] file . ID { fileID } )
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) performersRepository ( ) * joinRepository {
2021-01-18 01:23:20 +00:00
return & joinRepository {
repository : repository {
tx : qb . tx ,
tableName : performersScenesTable ,
idColumn : sceneIDColumn ,
} ,
fkColumn : performerIDColumn ,
2020-08-06 01:21:14 +00:00
}
2021-01-18 01:23:20 +00:00
}
2020-08-06 01:21:14 +00:00
2022-08-12 02:21:46 +00:00
func ( qb * SceneStore ) GetPerformerIDs ( ctx context . Context , id int ) ( [ ] int , error ) {
return qb . performersRepository ( ) . getIDs ( ctx , id )
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) tagsRepository ( ) * joinRepository {
2021-01-18 01:23:20 +00:00
return & joinRepository {
repository : repository {
tx : qb . tx ,
tableName : scenesTagsTable ,
idColumn : sceneIDColumn ,
} ,
2022-09-20 07:02:14 +00:00
fkColumn : tagIDColumn ,
foreignTable : tagTable ,
orderBy : "tags.name ASC" ,
2020-06-22 23:19:19 +00:00
}
2021-01-18 01:23:20 +00:00
}
2020-06-22 23:19:19 +00:00
2022-08-12 02:21:46 +00:00
func ( qb * SceneStore ) GetTagIDs ( ctx context . Context , id int ) ( [ ] int , error ) {
return qb . tagsRepository ( ) . getIDs ( ctx , id )
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) galleriesRepository ( ) * joinRepository {
2021-02-01 20:56:54 +00:00
return & joinRepository {
repository : repository {
tx : qb . tx ,
tableName : scenesGalleriesTable ,
idColumn : sceneIDColumn ,
} ,
fkColumn : galleryIDColumn ,
}
}
2022-08-12 02:21:46 +00:00
func ( qb * SceneStore ) GetGalleryIDs ( ctx context . Context , id int ) ( [ ] int , error ) {
return qb . galleriesRepository ( ) . getIDs ( ctx , id )
}
func ( qb * SceneStore ) AddGalleryIDs ( ctx context . Context , sceneID int , galleryIDs [ ] int ) error {
return scenesGalleriesTableMgr . addJoins ( ctx , sceneID , galleryIDs )
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) stashIDRepository ( ) * stashIDRepository {
2021-01-18 01:23:20 +00:00
return & stashIDRepository {
repository {
tx : qb . tx ,
tableName : "scene_stash_ids" ,
idColumn : sceneIDColumn ,
} ,
2020-06-22 23:19:19 +00:00
}
}
2022-08-12 02:21:46 +00:00
func ( qb * SceneStore ) GetStashIDs ( ctx context . Context , sceneID int ) ( [ ] models . StashID , error ) {
return qb . stashIDRepository ( ) . get ( ctx , sceneID )
}
2022-07-13 06:30:54 +00:00
func ( qb * SceneStore ) FindDuplicates ( ctx context . Context , distance int ) ( [ ] [ ] * models . Scene , error ) {
2021-04-11 23:04:40 +00:00
var dupeIds [ ] [ ] int
if distance == 0 {
var ids [ ] string
2022-05-19 07:49:32 +00:00
if err := qb . tx . Select ( ctx , & ids , findExactDuplicateQuery ) ; err != nil {
2021-04-11 23:04:40 +00:00
return nil , err
}
for _ , id := range ids {
strIds := strings . Split ( id , "," )
var sceneIds [ ] int
for _ , strId := range strIds {
if intId , err := strconv . Atoi ( strId ) ; err == nil {
2022-07-13 06:30:54 +00:00
sceneIds = intslice . IntAppendUnique ( sceneIds , intId )
2021-04-11 23:04:40 +00:00
}
}
2022-07-13 06:30:54 +00:00
// filter out
if len ( sceneIds ) > 1 {
dupeIds = append ( dupeIds , sceneIds )
}
2021-04-11 23:04:40 +00:00
}
} else {
var hashes [ ] * utils . Phash
2022-05-19 07:49:32 +00:00
if err := qb . queryFunc ( ctx , findAllPhashesQuery , nil , false , func ( rows * sqlx . Rows ) error {
2021-04-11 23:04:40 +00:00
phash := utils . Phash {
Bucket : - 1 ,
}
if err := rows . StructScan ( & phash ) ; err != nil {
return err
}
hashes = append ( hashes , & phash )
return nil
} ) ; err != nil {
return nil , err
}
dupeIds = utils . FindDuplicates ( hashes , distance )
}
var duplicates [ ] [ ] * models . Scene
for _ , sceneIds := range dupeIds {
2022-05-19 07:49:32 +00:00
if scenes , err := qb . FindMany ( ctx , sceneIds ) ; err == nil {
2021-04-11 23:04:40 +00:00
duplicates = append ( duplicates , scenes )
}
}
return duplicates , nil
}