2021-01-18 01:23:20 +00:00
|
|
|
package sqlite
|
2019-02-09 12:30:49 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"database/sql"
|
Errorlint sweep + minor linter tweaks (#1796)
* Replace error assertions with Go 1.13 style
Use `errors.As(..)` over type assertions. This enables better use of
wrapped errors in the future, and lets us pass some errorlint checks
in the process.
The rewrite is entirely mechanical, and uses a standard idiom for
doing so.
* Use Go 1.13's errors.Is(..)
Rather than directly checking for error equality, use errors.Is(..).
This protects against error wrapping issues in the future.
Even though something like sql.ErrNoRows doesn't need the wrapping, do
so anyway, for the sake of consistency throughout the code base.
The change almost lets us pass the `errorlint` Go checker except for
a missing case in `js.go` which is to be handled separately; it isn't
mechanical, like these changes are.
* Remove goconst
goconst isn't a useful linter in many cases, because it's false positive
rate is high. It's 100% for the current code base.
* Avoid direct comparison of errors in recover()
Assert that we are catching an error from recover(). If we are,
check that the error caught matches errStop.
* Enable the "errorlint" checker
Configure the checker to avoid checking for errorf wraps. These are
often false positives since the suggestion is to blanket wrap errors
with %w, and that exposes the underlying API which you might not want
to do.
The other warnings are good however, and with the current patch stack,
the code base passes all these checks as well.
* Configure rowserrcheck
The project uses sqlx. Configure rowserrcheck to include said package.
* Mechanically rewrite a large set of errors
Mechanically search for errors that look like
fmt.Errorf("...%s", err.Error())
and rewrite those into
fmt.Errorf("...%v", err)
The `fmt` package is error-aware and knows how to call err.Error()
itself.
The rationale is that this is more idiomatic Go; it paves the
way for using error wrapping later with %w in some sites.
This patch only addresses the entirely mechanical rewriting caught by
a project-side search/replace. There are more individual sites not
addressed by this patch.
2021-10-12 03:03:08 +00:00
|
|
|
"errors"
|
2020-07-19 01:59:18 +00:00
|
|
|
"fmt"
|
2021-03-02 00:27:36 +00:00
|
|
|
"strconv"
|
2021-04-11 23:04:40 +00:00
|
|
|
"strings"
|
2019-08-15 07:32:57 +00:00
|
|
|
|
|
|
|
"github.com/jmoiron/sqlx"
|
2021-01-18 01:23:20 +00:00
|
|
|
"github.com/stashapp/stash/pkg/models"
|
2021-04-11 23:04:40 +00:00
|
|
|
"github.com/stashapp/stash/pkg/utils"
|
2019-02-09 12:30:49 +00:00
|
|
|
)
|
|
|
|
|
2020-05-11 05:19:11 +00:00
|
|
|
const sceneTable = "scenes"
|
2021-01-18 01:23:20 +00:00
|
|
|
const sceneIDColumn = "scene_id"
|
|
|
|
const performersScenesTable = "performers_scenes"
|
|
|
|
const scenesTagsTable = "scenes_tags"
|
2021-02-01 20:56:54 +00:00
|
|
|
const scenesGalleriesTable = "scenes_galleries"
|
2021-01-18 01:23:20 +00:00
|
|
|
const moviesScenesTable = "movies_scenes"
|
2020-05-11 05:19:11 +00:00
|
|
|
|
|
|
|
var scenesForPerformerQuery = selectAll(sceneTable) + `
|
2019-02-09 12:30:49 +00:00
|
|
|
LEFT JOIN performers_scenes as performers_join on performers_join.scene_id = scenes.id
|
2020-05-11 05:19:11 +00:00
|
|
|
WHERE performers_join.performer_id = ?
|
2019-02-09 12:30:49 +00:00
|
|
|
GROUP BY scenes.id
|
|
|
|
`
|
|
|
|
|
2020-05-11 05:19:11 +00:00
|
|
|
var countScenesForPerformerQuery = `
|
|
|
|
SELECT performer_id FROM performers_scenes as performers_join
|
|
|
|
WHERE performer_id = ?
|
|
|
|
GROUP BY scene_id
|
|
|
|
`
|
|
|
|
|
|
|
|
var scenesForStudioQuery = selectAll(sceneTable) + `
|
2019-02-09 12:30:49 +00:00
|
|
|
JOIN studios ON studios.id = scenes.studio_id
|
|
|
|
WHERE studios.id = ?
|
|
|
|
GROUP BY scenes.id
|
|
|
|
`
|
2020-05-11 05:19:11 +00:00
|
|
|
var scenesForMovieQuery = selectAll(sceneTable) + `
|
2020-03-10 03:28:15 +00:00
|
|
|
LEFT JOIN movies_scenes as movies_join on movies_join.scene_id = scenes.id
|
2020-05-11 05:19:11 +00:00
|
|
|
WHERE movies_join.movie_id = ?
|
2020-03-10 03:28:15 +00:00
|
|
|
GROUP BY scenes.id
|
|
|
|
`
|
2019-02-09 12:30:49 +00:00
|
|
|
|
2020-05-24 06:18:02 +00:00
|
|
|
var countScenesForTagQuery = `
|
|
|
|
SELECT tag_id AS id FROM scenes_tags
|
|
|
|
WHERE scenes_tags.tag_id = ?
|
|
|
|
GROUP BY scenes_tags.scene_id
|
2019-02-09 12:30:49 +00:00
|
|
|
`
|
|
|
|
|
2021-02-01 20:56:54 +00:00
|
|
|
var scenesForGalleryQuery = selectAll(sceneTable) + `
|
|
|
|
LEFT JOIN scenes_galleries as galleries_join on galleries_join.scene_id = scenes.id
|
|
|
|
WHERE galleries_join.gallery_id = ?
|
|
|
|
GROUP BY scenes.id
|
|
|
|
`
|
|
|
|
|
2020-08-06 01:21:14 +00:00
|
|
|
var countScenesForMissingChecksumQuery = `
|
|
|
|
SELECT id FROM scenes
|
|
|
|
WHERE scenes.checksum is null
|
|
|
|
`
|
|
|
|
|
|
|
|
var countScenesForMissingOSHashQuery = `
|
|
|
|
SELECT id FROM scenes
|
|
|
|
WHERE scenes.oshash is null
|
|
|
|
`
|
|
|
|
|
2021-04-11 23:04:40 +00:00
|
|
|
var findExactDuplicateQuery = `
|
|
|
|
SELECT GROUP_CONCAT(id) as ids
|
|
|
|
FROM scenes
|
|
|
|
WHERE phash IS NOT NULL
|
|
|
|
GROUP BY phash
|
2021-08-18 03:11:57 +00:00
|
|
|
HAVING COUNT(phash) > 1
|
|
|
|
ORDER BY SUM(size) DESC;
|
2021-04-11 23:04:40 +00:00
|
|
|
`
|
|
|
|
|
|
|
|
var findAllPhashesQuery = `
|
|
|
|
SELECT id, phash
|
|
|
|
FROM scenes
|
|
|
|
WHERE phash IS NOT NULL
|
2021-08-18 03:11:57 +00:00
|
|
|
ORDER BY size DESC
|
2021-04-11 23:04:40 +00:00
|
|
|
`
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
type sceneQueryBuilder struct {
|
|
|
|
repository
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func NewSceneReaderWriter(tx dbi) *sceneQueryBuilder {
|
|
|
|
return &sceneQueryBuilder{
|
|
|
|
repository{
|
|
|
|
tx: tx,
|
|
|
|
tableName: sceneTable,
|
|
|
|
idColumn: idColumn,
|
|
|
|
},
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) Create(newObject models.Scene) (*models.Scene, error) {
|
|
|
|
var ret models.Scene
|
|
|
|
if err := qb.insertObject(newObject, &ret); err != nil {
|
2019-02-09 12:30:49 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
return &ret, nil
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) Update(updatedObject models.ScenePartial) (*models.Scene, error) {
|
|
|
|
const partial = true
|
|
|
|
if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil {
|
2020-09-20 08:36:02 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
return qb.find(updatedObject.ID)
|
2020-09-20 08:36:02 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) UpdateFull(updatedObject models.Scene) (*models.Scene, error) {
|
|
|
|
const partial = false
|
|
|
|
if err := qb.update(updatedObject.ID, updatedObject, partial); err != nil {
|
|
|
|
return nil, err
|
2020-11-04 23:26:51 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
return qb.find(updatedObject.ID)
|
2020-11-04 23:26:51 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) UpdateFileModTime(id int, modTime models.NullSQLiteTimestamp) error {
|
|
|
|
return qb.updateMap(id, map[string]interface{}{
|
|
|
|
"file_mod_time": modTime,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) IncrementOCounter(id int) (int, error) {
|
|
|
|
_, err := qb.tx.Exec(
|
2020-02-03 00:17:28 +00:00
|
|
|
`UPDATE scenes SET o_counter = o_counter + 1 WHERE scenes.id = ?`,
|
|
|
|
id,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
scene, err := qb.find(id)
|
2020-02-03 00:17:28 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return scene.OCounter, nil
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) DecrementOCounter(id int) (int, error) {
|
|
|
|
_, err := qb.tx.Exec(
|
2020-02-03 00:17:28 +00:00
|
|
|
`UPDATE scenes SET o_counter = o_counter - 1 WHERE scenes.id = ? and scenes.o_counter > 0`,
|
|
|
|
id,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
scene, err := qb.find(id)
|
2020-02-03 00:17:28 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return scene.OCounter, nil
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) ResetOCounter(id int) (int, error) {
|
|
|
|
_, err := qb.tx.Exec(
|
2020-02-03 00:17:28 +00:00
|
|
|
`UPDATE scenes SET o_counter = 0 WHERE scenes.id = ?`,
|
|
|
|
id,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
scene, err := qb.find(id)
|
2020-02-03 00:17:28 +00:00
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return scene.OCounter, nil
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) Destroy(id int) error {
|
|
|
|
// delete all related table rows
|
|
|
|
// TODO - this should be handled by a delete cascade
|
|
|
|
if err := qb.performersRepository().destroy([]int{id}); err != nil {
|
2020-04-01 01:07:43 +00:00
|
|
|
return err
|
|
|
|
}
|
2021-01-18 01:23:20 +00:00
|
|
|
|
|
|
|
// scene markers should be handled prior to calling destroy
|
|
|
|
// galleries should be handled prior to calling destroy
|
|
|
|
|
|
|
|
return qb.destroyExisting([]int{id})
|
2019-08-15 07:32:57 +00:00
|
|
|
}
|
2021-01-18 01:23:20 +00:00
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) Find(id int) (*models.Scene, error) {
|
|
|
|
return qb.find(id)
|
2019-10-14 23:54:05 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) FindMany(ids []int) ([]*models.Scene, error) {
|
|
|
|
var scenes []*models.Scene
|
2020-07-19 01:59:18 +00:00
|
|
|
for _, id := range ids {
|
|
|
|
scene, err := qb.Find(id)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if scene == nil {
|
|
|
|
return nil, fmt.Errorf("scene with id %d not found", id)
|
|
|
|
}
|
|
|
|
|
|
|
|
scenes = append(scenes, scene)
|
|
|
|
}
|
|
|
|
|
|
|
|
return scenes, nil
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) find(id int) (*models.Scene, error) {
|
|
|
|
var ret models.Scene
|
|
|
|
if err := qb.get(id, &ret); err != nil {
|
Errorlint sweep + minor linter tweaks (#1796)
* Replace error assertions with Go 1.13 style
Use `errors.As(..)` over type assertions. This enables better use of
wrapped errors in the future, and lets us pass some errorlint checks
in the process.
The rewrite is entirely mechanical, and uses a standard idiom for
doing so.
* Use Go 1.13's errors.Is(..)
Rather than directly checking for error equality, use errors.Is(..).
This protects against error wrapping issues in the future.
Even though something like sql.ErrNoRows doesn't need the wrapping, do
so anyway, for the sake of consistency throughout the code base.
The change almost lets us pass the `errorlint` Go checker except for
a missing case in `js.go` which is to be handled separately; it isn't
mechanical, like these changes are.
* Remove goconst
goconst isn't a useful linter in many cases, because it's false positive
rate is high. It's 100% for the current code base.
* Avoid direct comparison of errors in recover()
Assert that we are catching an error from recover(). If we are,
check that the error caught matches errStop.
* Enable the "errorlint" checker
Configure the checker to avoid checking for errorf wraps. These are
often false positives since the suggestion is to blanket wrap errors
with %w, and that exposes the underlying API which you might not want
to do.
The other warnings are good however, and with the current patch stack,
the code base passes all these checks as well.
* Configure rowserrcheck
The project uses sqlx. Configure rowserrcheck to include said package.
* Mechanically rewrite a large set of errors
Mechanically search for errors that look like
fmt.Errorf("...%s", err.Error())
and rewrite those into
fmt.Errorf("...%v", err)
The `fmt` package is error-aware and knows how to call err.Error()
itself.
The rationale is that this is more idiomatic Go; it paves the
way for using error wrapping later with %w in some sites.
This patch only addresses the entirely mechanical rewriting caught by
a project-side search/replace. There are more individual sites not
addressed by this patch.
2021-10-12 03:03:08 +00:00
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
2021-01-18 01:23:20 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &ret, nil
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) FindByChecksum(checksum string) (*models.Scene, error) {
|
2019-02-09 12:30:49 +00:00
|
|
|
query := "SELECT * FROM scenes WHERE checksum = ? LIMIT 1"
|
|
|
|
args := []interface{}{checksum}
|
2021-01-18 01:23:20 +00:00
|
|
|
return qb.queryScene(query, args)
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) FindByOSHash(oshash string) (*models.Scene, error) {
|
2020-08-06 01:21:14 +00:00
|
|
|
query := "SELECT * FROM scenes WHERE oshash = ? LIMIT 1"
|
|
|
|
args := []interface{}{oshash}
|
2021-01-18 01:23:20 +00:00
|
|
|
return qb.queryScene(query, args)
|
2020-08-06 01:21:14 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) FindByPath(path string) (*models.Scene, error) {
|
2020-05-11 05:19:11 +00:00
|
|
|
query := selectAll(sceneTable) + "WHERE path = ? LIMIT 1"
|
2019-02-09 12:30:49 +00:00
|
|
|
args := []interface{}{path}
|
2021-01-18 01:23:20 +00:00
|
|
|
return qb.queryScene(query, args)
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) FindByPerformerID(performerID int) ([]*models.Scene, error) {
|
2019-02-09 12:30:49 +00:00
|
|
|
args := []interface{}{performerID}
|
2021-01-18 01:23:20 +00:00
|
|
|
return qb.queryScenes(scenesForPerformerQuery, args)
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-02-01 20:56:54 +00:00
|
|
|
func (qb *sceneQueryBuilder) FindByGalleryID(galleryID int) ([]*models.Scene, error) {
|
|
|
|
args := []interface{}{galleryID}
|
|
|
|
return qb.queryScenes(scenesForGalleryQuery, args)
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) CountByPerformerID(performerID int) (int, error) {
|
2019-02-09 12:30:49 +00:00
|
|
|
args := []interface{}{performerID}
|
2021-01-18 01:23:20 +00:00
|
|
|
return qb.runCountQuery(qb.buildCountQuery(countScenesForPerformerQuery), args)
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) FindByMovieID(movieID int) ([]*models.Scene, error) {
|
2020-03-10 03:28:15 +00:00
|
|
|
args := []interface{}{movieID}
|
2021-01-18 01:23:20 +00:00
|
|
|
return qb.queryScenes(scenesForMovieQuery, args)
|
2020-03-10 03:28:15 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) CountByMovieID(movieID int) (int, error) {
|
2020-03-10 03:28:15 +00:00
|
|
|
args := []interface{}{movieID}
|
2021-01-18 01:23:20 +00:00
|
|
|
return qb.runCountQuery(qb.buildCountQuery(scenesForMovieQuery), args)
|
2020-03-10 03:28:15 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) Count() (int, error) {
|
|
|
|
return qb.runCountQuery(qb.buildCountQuery("SELECT scenes.id FROM scenes"), nil)
|
2019-02-11 20:36:10 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) Size() (float64, error) {
|
|
|
|
return qb.runSumQuery("SELECT SUM(cast(size as double)) as sum FROM scenes", nil)
|
2021-08-26 03:37:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) Duration() (float64, error) {
|
|
|
|
return qb.runSumQuery("SELECT SUM(cast(duration as double)) as sum FROM scenes", nil)
|
2020-04-03 02:44:17 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) CountByStudioID(studioID int) (int, error) {
|
2019-02-09 12:30:49 +00:00
|
|
|
args := []interface{}{studioID}
|
2021-01-18 01:23:20 +00:00
|
|
|
return qb.runCountQuery(qb.buildCountQuery(scenesForStudioQuery), args)
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) CountByTagID(tagID int) (int, error) {
|
2019-02-09 12:30:49 +00:00
|
|
|
args := []interface{}{tagID}
|
2021-01-18 01:23:20 +00:00
|
|
|
return qb.runCountQuery(qb.buildCountQuery(countScenesForTagQuery), args)
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2020-08-06 01:21:14 +00:00
|
|
|
// CountMissingChecksum returns the number of scenes missing a checksum value.
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) CountMissingChecksum() (int, error) {
|
|
|
|
return qb.runCountQuery(qb.buildCountQuery(countScenesForMissingChecksumQuery), []interface{}{})
|
2020-08-06 01:21:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CountMissingOSHash returns the number of scenes missing an oshash value.
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) CountMissingOSHash() (int, error) {
|
|
|
|
return qb.runCountQuery(qb.buildCountQuery(countScenesForMissingOSHashQuery), []interface{}{})
|
2020-08-06 01:21:14 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) Wall(q *string) ([]*models.Scene, error) {
|
2019-02-09 12:30:49 +00:00
|
|
|
s := ""
|
|
|
|
if q != nil {
|
|
|
|
s = *q
|
|
|
|
}
|
2020-05-11 05:19:11 +00:00
|
|
|
query := selectAll(sceneTable) + "WHERE scenes.details LIKE '%" + s + "%' ORDER BY RANDOM() LIMIT 80"
|
2021-01-18 01:23:20 +00:00
|
|
|
return qb.queryScenes(query, nil)
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) All() ([]*models.Scene, error) {
|
2021-04-09 08:46:00 +00:00
|
|
|
return qb.queryScenes(selectAll(sceneTable)+qb.getDefaultSceneSort(), nil)
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
func illegalFilterCombination(type1, type2 string) error {
|
|
|
|
return fmt.Errorf("cannot have %s and %s in the same filter", type1, type2)
|
|
|
|
}
|
2021-02-01 20:57:56 +00:00
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
func (qb *sceneQueryBuilder) validateFilter(sceneFilter *models.SceneFilterType) error {
|
|
|
|
const and = "AND"
|
|
|
|
const or = "OR"
|
|
|
|
const not = "NOT"
|
2021-02-01 20:57:56 +00:00
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
if sceneFilter.And != nil {
|
|
|
|
if sceneFilter.Or != nil {
|
|
|
|
return illegalFilterCombination(and, or)
|
|
|
|
}
|
|
|
|
if sceneFilter.Not != nil {
|
|
|
|
return illegalFilterCombination(and, not)
|
2021-02-01 20:57:56 +00:00
|
|
|
}
|
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
return qb.validateFilter(sceneFilter.And)
|
2021-02-01 20:57:56 +00:00
|
|
|
}
|
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
if sceneFilter.Or != nil {
|
|
|
|
if sceneFilter.Not != nil {
|
|
|
|
return illegalFilterCombination(or, not)
|
|
|
|
}
|
2021-02-01 20:57:56 +00:00
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
return qb.validateFilter(sceneFilter.Or)
|
2021-02-01 20:57:56 +00:00
|
|
|
}
|
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
if sceneFilter.Not != nil {
|
|
|
|
return qb.validateFilter(sceneFilter.Not)
|
|
|
|
}
|
2021-02-01 20:57:56 +00:00
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
return nil
|
|
|
|
}
|
2021-02-01 20:57:56 +00:00
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
func (qb *sceneQueryBuilder) makeFilter(sceneFilter *models.SceneFilterType) *filterBuilder {
|
|
|
|
query := &filterBuilder{}
|
|
|
|
|
|
|
|
if sceneFilter.And != nil {
|
|
|
|
query.and(qb.makeFilter(sceneFilter.And))
|
|
|
|
}
|
|
|
|
if sceneFilter.Or != nil {
|
|
|
|
query.or(qb.makeFilter(sceneFilter.Or))
|
|
|
|
}
|
|
|
|
if sceneFilter.Not != nil {
|
|
|
|
query.not(qb.makeFilter(sceneFilter.Not))
|
2021-02-01 20:57:56 +00:00
|
|
|
}
|
|
|
|
|
2021-06-21 05:48:28 +00:00
|
|
|
query.handleCriterion(stringCriterionHandler(sceneFilter.Path, "scenes.path"))
|
2021-06-22 23:10:20 +00:00
|
|
|
query.handleCriterion(stringCriterionHandler(sceneFilter.Title, "scenes.title"))
|
|
|
|
query.handleCriterion(stringCriterionHandler(sceneFilter.Details, "scenes.details"))
|
|
|
|
query.handleCriterion(stringCriterionHandler(sceneFilter.Oshash, "scenes.oshash"))
|
|
|
|
query.handleCriterion(stringCriterionHandler(sceneFilter.Checksum, "scenes.checksum"))
|
|
|
|
query.handleCriterion(phashCriterionHandler(sceneFilter.Phash))
|
2021-06-21 05:48:28 +00:00
|
|
|
query.handleCriterion(intCriterionHandler(sceneFilter.Rating, "scenes.rating"))
|
|
|
|
query.handleCriterion(intCriterionHandler(sceneFilter.OCounter, "scenes.o_counter"))
|
|
|
|
query.handleCriterion(boolCriterionHandler(sceneFilter.Organized, "scenes.organized"))
|
|
|
|
query.handleCriterion(durationCriterionHandler(sceneFilter.Duration, "scenes.duration"))
|
|
|
|
query.handleCriterion(resolutionCriterionHandler(sceneFilter.Resolution, "scenes.height", "scenes.width"))
|
|
|
|
query.handleCriterion(hasMarkersCriterionHandler(sceneFilter.HasMarkers))
|
|
|
|
query.handleCriterion(sceneIsMissingCriterionHandler(qb, sceneFilter.IsMissing))
|
|
|
|
query.handleCriterion(stringCriterionHandler(sceneFilter.URL, "scenes.url"))
|
|
|
|
|
|
|
|
query.handleCriterion(criterionHandlerFunc(func(f *filterBuilder) {
|
|
|
|
if sceneFilter.StashID != nil {
|
|
|
|
qb.stashIDRepository().join(f, "scene_stash_ids", "scenes.id")
|
|
|
|
stringCriterionHandler(sceneFilter.StashID, "scene_stash_ids.stash_id")(f)
|
|
|
|
}
|
|
|
|
}))
|
|
|
|
|
|
|
|
query.handleCriterion(boolCriterionHandler(sceneFilter.Interactive, "scenes.interactive"))
|
|
|
|
|
|
|
|
query.handleCriterion(sceneTagsCriterionHandler(qb, sceneFilter.Tags))
|
|
|
|
query.handleCriterion(sceneTagCountCriterionHandler(qb, sceneFilter.TagCount))
|
|
|
|
query.handleCriterion(scenePerformersCriterionHandler(qb, sceneFilter.Performers))
|
|
|
|
query.handleCriterion(scenePerformerCountCriterionHandler(qb, sceneFilter.PerformerCount))
|
|
|
|
query.handleCriterion(sceneStudioCriterionHandler(qb, sceneFilter.Studios))
|
|
|
|
query.handleCriterion(sceneMoviesCriterionHandler(qb, sceneFilter.Movies))
|
|
|
|
query.handleCriterion(scenePerformerTagsCriterionHandler(qb, sceneFilter.PerformerTags))
|
2021-03-02 00:27:36 +00:00
|
|
|
|
|
|
|
return query
|
2021-02-01 20:57:56 +00:00
|
|
|
}
|
|
|
|
|
2021-10-25 00:40:13 +00:00
|
|
|
func (qb *sceneQueryBuilder) Query(options models.SceneQueryOptions) (*models.SceneQueryResult, error) {
|
|
|
|
sceneFilter := options.SceneFilter
|
|
|
|
findFilter := options.FindFilter
|
|
|
|
|
2019-02-09 12:30:49 +00:00
|
|
|
if sceneFilter == nil {
|
2021-01-18 01:23:20 +00:00
|
|
|
sceneFilter = &models.SceneFilterType{}
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
if findFilter == nil {
|
2021-01-18 01:23:20 +00:00
|
|
|
findFilter = &models.FindFilterType{}
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
query := qb.newQuery()
|
2021-10-25 00:40:13 +00:00
|
|
|
distinctIDs(&query, sceneTable)
|
2019-02-09 12:30:49 +00:00
|
|
|
|
|
|
|
if q := findFilter.Q; q != nil && *q != "" {
|
2021-03-02 00:27:36 +00:00
|
|
|
query.join("scene_markers", "", "scene_markers.scene_id = scenes.id")
|
2020-08-06 01:21:14 +00:00
|
|
|
searchColumns := []string{"scenes.title", "scenes.details", "scenes.path", "scenes.oshash", "scenes.checksum", "scene_markers.title"}
|
2020-03-02 22:18:14 +00:00
|
|
|
clause, thisArgs := getSearchBinding(searchColumns, *q, false)
|
2020-05-11 05:19:11 +00:00
|
|
|
query.addWhere(clause)
|
|
|
|
query.addArg(thisArgs...)
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
if err := qb.validateFilter(sceneFilter); err != nil {
|
2021-10-25 00:40:13 +00:00
|
|
|
return nil, err
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
2021-03-02 00:27:36 +00:00
|
|
|
filter := qb.makeFilter(sceneFilter)
|
2019-02-09 12:30:49 +00:00
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
query.addFilter(filter)
|
2020-10-24 03:31:39 +00:00
|
|
|
|
2021-04-09 08:46:00 +00:00
|
|
|
qb.setSceneSort(&query, findFilter)
|
|
|
|
query.sortAndPagination += getPagination(findFilter)
|
2021-02-01 20:57:56 +00:00
|
|
|
|
2021-10-25 00:40:13 +00:00
|
|
|
result, err := qb.queryGroupedFields(options, query)
|
2021-01-18 01:23:20 +00:00
|
|
|
if err != nil {
|
2021-10-25 00:40:13 +00:00
|
|
|
return nil, fmt.Errorf("error querying aggregate fields: %w", err)
|
2021-01-18 01:23:20 +00:00
|
|
|
}
|
2019-02-09 12:30:49 +00:00
|
|
|
|
2021-10-25 00:40:13 +00:00
|
|
|
idsResult, err := query.findIDs()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error finding IDs: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
result.IDs = idsResult
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) queryGroupedFields(options models.SceneQueryOptions, query queryBuilder) (*models.SceneQueryResult, error) {
|
|
|
|
if !options.Count && !options.TotalDuration && !options.TotalSize {
|
|
|
|
// nothing to do - return empty result
|
|
|
|
return models.NewSceneQueryResult(qb), nil
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-10-25 00:40:13 +00:00
|
|
|
aggregateQuery := qb.newQuery()
|
|
|
|
|
|
|
|
if options.Count {
|
|
|
|
aggregateQuery.addColumn("COUNT(temp.id) as total")
|
|
|
|
}
|
|
|
|
|
|
|
|
if options.TotalDuration {
|
|
|
|
query.addColumn("COALESCE(scenes.duration, 0) as duration")
|
|
|
|
aggregateQuery.addColumn("COALESCE(SUM(temp.duration), 0) as duration")
|
|
|
|
}
|
|
|
|
|
|
|
|
if options.TotalSize {
|
|
|
|
query.addColumn("COALESCE(scenes.size, 0) as size")
|
|
|
|
aggregateQuery.addColumn("COALESCE(SUM(temp.size), 0) as size")
|
|
|
|
}
|
|
|
|
|
|
|
|
const includeSortPagination = false
|
|
|
|
aggregateQuery.from = fmt.Sprintf("(%s) as temp", query.toSQL(includeSortPagination))
|
|
|
|
|
|
|
|
out := struct {
|
|
|
|
Total int
|
|
|
|
Duration float64
|
2021-10-27 00:55:11 +00:00
|
|
|
Size float64
|
2021-10-25 00:40:13 +00:00
|
|
|
}{}
|
|
|
|
if err := qb.repository.queryStruct(aggregateQuery.toSQL(includeSortPagination), query.args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
ret := models.NewSceneQueryResult(qb)
|
|
|
|
ret.Count = out.Total
|
|
|
|
ret.TotalDuration = out.Duration
|
|
|
|
ret.TotalSize = out.Size
|
|
|
|
return ret, nil
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-06-22 23:10:20 +00:00
|
|
|
func phashCriterionHandler(phashFilter *models.StringCriterionInput) criterionHandlerFunc {
|
|
|
|
return func(f *filterBuilder) {
|
|
|
|
if phashFilter != nil {
|
|
|
|
// convert value to int from hex
|
|
|
|
// ignore errors
|
|
|
|
value, _ := utils.StringToPhash(phashFilter.Value)
|
|
|
|
|
|
|
|
if modifier := phashFilter.Modifier; phashFilter.Modifier.IsValid() {
|
|
|
|
switch modifier {
|
|
|
|
case models.CriterionModifierEquals:
|
|
|
|
f.addWhere("scenes.phash = ?", value)
|
|
|
|
case models.CriterionModifierNotEquals:
|
|
|
|
f.addWhere("scenes.phash != ?", value)
|
|
|
|
case models.CriterionModifierIsNull:
|
|
|
|
f.addWhere("scenes.phash IS NULL")
|
|
|
|
case models.CriterionModifierNotNull:
|
|
|
|
f.addWhere("scenes.phash IS NOT NULL")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
func durationCriterionHandler(durationFilter *models.IntCriterionInput, column string) criterionHandlerFunc {
|
|
|
|
return func(f *filterBuilder) {
|
|
|
|
if durationFilter != nil {
|
2021-08-12 00:24:16 +00:00
|
|
|
clause, args := getIntCriterionWhereClause("cast("+column+" as int)", *durationFilter)
|
|
|
|
f.addWhere(clause, args...)
|
2021-03-02 00:27:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-08-02 03:22:39 +00:00
|
|
|
func resolutionCriterionHandler(resolution *models.ResolutionCriterionInput, heightColumn string, widthColumn string) criterionHandlerFunc {
|
2021-03-02 00:27:36 +00:00
|
|
|
return func(f *filterBuilder) {
|
2021-08-02 03:22:39 +00:00
|
|
|
if resolution != nil && resolution.Value.IsValid() {
|
|
|
|
min := resolution.Value.GetMinResolution()
|
|
|
|
max := resolution.Value.GetMaxResolution()
|
2021-03-02 00:27:36 +00:00
|
|
|
|
|
|
|
widthHeight := fmt.Sprintf("MIN(%s, %s)", widthColumn, heightColumn)
|
|
|
|
|
Enable gocritic (#1848)
* Don't capitalize local variables
ValidCodecs -> validCodecs
* Capitalize deprecation markers
A deprecated marker should be capitalized.
* Use re.MustCompile for static regexes
If the regex fails to compile, it's a programmer error, and should be
treated as such. The regex is entirely static.
* Simplify else-if constructions
Rewrite
else { if cond {}}
to
else if cond {}
* Use a switch statement to analyze formats
Break an if-else chain. While here, simplify code flow.
Also introduce a proper static error for unsupported image formats,
paving the way for being able to check against the error.
* Rewrite ifElse chains into switch statements
The "Effective Go" https://golang.org/doc/effective_go#switch document
mentions it is more idiomatic to write if-else chains as switches when
it is possible.
Find all the plain rewrite occurrences in the code base and rewrite.
In some cases, the if-else chains are replaced by a switch scrutinizer.
That is, the code sequence
if x == 1 {
..
} else if x == 2 {
..
} else if x == 3 {
...
}
can be rewritten into
switch x {
case 1:
..
case 2:
..
case 3:
..
}
which is clearer for the compiler: it can decide if the switch is
better served by a jump-table then a branch-chain.
* Rewrite switches, introduce static errors
Introduce two new static errors:
* `ErrNotImplmented`
* `ErrNotSupported`
And use these rather than forming new generative errors whenever the
code is called. Code can now test on the errors (since they are static
and the pointers to them wont change).
Also rewrite ifElse chains into switches in this part of the code base.
* Introduce a StashBoxError in configuration
Since all stashbox errors are the same, treat them as such in the code
base. While here, rewrite an ifElse chain.
In the future, it might be beneifical to refactor configuration errors
into one error which can handle missing fields, which context the error
occurs in and so on. But for now, try to get an overview of the error
categories by hoisting them into static errors.
* Get rid of an else-block in transaction handling
If we succesfully `recover()`, we then always `panic()`. This means the
rest of the code is not reachable, so we can avoid having an else-block
here.
It also solves an ifElse-chain style check in the code base.
* Use strings.ReplaceAll
Rewrite
strings.Replace(s, o, n, -1)
into
strings.ReplaceAll(s, o, n)
To make it consistent and clear that we are doing an all-replace in the
string rather than replacing parts of it. It's more of a nitpick since
there are no implementation differences: the stdlib implementation is
just to supply -1.
* Rewrite via gocritic's assignOp
Statements of the form
x = x + e
is rewritten into
x += e
where applicable.
* Formatting
* Review comments handled
Stash-box is a proper noun.
Rewrite a switch into an if-chain which returns on the first error
encountered.
* Use context.TODO() over context.Background()
Patch in the same vein as everything else: use the TODO() marker so we
can search for it later and link it into the context tree/tentacle once
it reaches down to this level in the code base.
* Tell the linter to ignore a section in manager_tasks.go
The section is less readable, so mark it with a nolint for now. Because
the rewrite enables a ifElseChain, also mark that as nolint for now.
* Use strings.ReplaceAll over strings.Replace
* Apply an ifElse rewrite
else { if .. { .. } } rewrite into else if { .. }
* Use switch-statements over ifElseChains
Rewrite chains of if-else into switch statements. Where applicable,
add an early nil-guard to simplify case analysis. Also, in
ScanTask's Start(..), invert the logic to outdent the whole block, and
help the reader: if it's not a scene, the function flow is now far more
local to the top of the function, and it's clear that the rest of the
function has to do with scene management.
* Enable gocritic on the code base.
Disable appendAssign for now since we aren't passing that check yet.
* Document the nolint additions
* Document StashBoxBatchPerformerTagInput
2021-10-18 03:12:40 +00:00
|
|
|
switch resolution.Modifier {
|
|
|
|
case models.CriterionModifierEquals:
|
2021-08-02 03:22:39 +00:00
|
|
|
f.addWhere(fmt.Sprintf("%s BETWEEN %d AND %d", widthHeight, min, max))
|
Enable gocritic (#1848)
* Don't capitalize local variables
ValidCodecs -> validCodecs
* Capitalize deprecation markers
A deprecated marker should be capitalized.
* Use re.MustCompile for static regexes
If the regex fails to compile, it's a programmer error, and should be
treated as such. The regex is entirely static.
* Simplify else-if constructions
Rewrite
else { if cond {}}
to
else if cond {}
* Use a switch statement to analyze formats
Break an if-else chain. While here, simplify code flow.
Also introduce a proper static error for unsupported image formats,
paving the way for being able to check against the error.
* Rewrite ifElse chains into switch statements
The "Effective Go" https://golang.org/doc/effective_go#switch document
mentions it is more idiomatic to write if-else chains as switches when
it is possible.
Find all the plain rewrite occurrences in the code base and rewrite.
In some cases, the if-else chains are replaced by a switch scrutinizer.
That is, the code sequence
if x == 1 {
..
} else if x == 2 {
..
} else if x == 3 {
...
}
can be rewritten into
switch x {
case 1:
..
case 2:
..
case 3:
..
}
which is clearer for the compiler: it can decide if the switch is
better served by a jump-table then a branch-chain.
* Rewrite switches, introduce static errors
Introduce two new static errors:
* `ErrNotImplmented`
* `ErrNotSupported`
And use these rather than forming new generative errors whenever the
code is called. Code can now test on the errors (since they are static
and the pointers to them wont change).
Also rewrite ifElse chains into switches in this part of the code base.
* Introduce a StashBoxError in configuration
Since all stashbox errors are the same, treat them as such in the code
base. While here, rewrite an ifElse chain.
In the future, it might be beneifical to refactor configuration errors
into one error which can handle missing fields, which context the error
occurs in and so on. But for now, try to get an overview of the error
categories by hoisting them into static errors.
* Get rid of an else-block in transaction handling
If we succesfully `recover()`, we then always `panic()`. This means the
rest of the code is not reachable, so we can avoid having an else-block
here.
It also solves an ifElse-chain style check in the code base.
* Use strings.ReplaceAll
Rewrite
strings.Replace(s, o, n, -1)
into
strings.ReplaceAll(s, o, n)
To make it consistent and clear that we are doing an all-replace in the
string rather than replacing parts of it. It's more of a nitpick since
there are no implementation differences: the stdlib implementation is
just to supply -1.
* Rewrite via gocritic's assignOp
Statements of the form
x = x + e
is rewritten into
x += e
where applicable.
* Formatting
* Review comments handled
Stash-box is a proper noun.
Rewrite a switch into an if-chain which returns on the first error
encountered.
* Use context.TODO() over context.Background()
Patch in the same vein as everything else: use the TODO() marker so we
can search for it later and link it into the context tree/tentacle once
it reaches down to this level in the code base.
* Tell the linter to ignore a section in manager_tasks.go
The section is less readable, so mark it with a nolint for now. Because
the rewrite enables a ifElseChain, also mark that as nolint for now.
* Use strings.ReplaceAll over strings.Replace
* Apply an ifElse rewrite
else { if .. { .. } } rewrite into else if { .. }
* Use switch-statements over ifElseChains
Rewrite chains of if-else into switch statements. Where applicable,
add an early nil-guard to simplify case analysis. Also, in
ScanTask's Start(..), invert the logic to outdent the whole block, and
help the reader: if it's not a scene, the function flow is now far more
local to the top of the function, and it's clear that the rest of the
function has to do with scene management.
* Enable gocritic on the code base.
Disable appendAssign for now since we aren't passing that check yet.
* Document the nolint additions
* Document StashBoxBatchPerformerTagInput
2021-10-18 03:12:40 +00:00
|
|
|
case models.CriterionModifierNotEquals:
|
2021-08-02 03:22:39 +00:00
|
|
|
f.addWhere(fmt.Sprintf("%s NOT BETWEEN %d AND %d", widthHeight, min, max))
|
Enable gocritic (#1848)
* Don't capitalize local variables
ValidCodecs -> validCodecs
* Capitalize deprecation markers
A deprecated marker should be capitalized.
* Use re.MustCompile for static regexes
If the regex fails to compile, it's a programmer error, and should be
treated as such. The regex is entirely static.
* Simplify else-if constructions
Rewrite
else { if cond {}}
to
else if cond {}
* Use a switch statement to analyze formats
Break an if-else chain. While here, simplify code flow.
Also introduce a proper static error for unsupported image formats,
paving the way for being able to check against the error.
* Rewrite ifElse chains into switch statements
The "Effective Go" https://golang.org/doc/effective_go#switch document
mentions it is more idiomatic to write if-else chains as switches when
it is possible.
Find all the plain rewrite occurrences in the code base and rewrite.
In some cases, the if-else chains are replaced by a switch scrutinizer.
That is, the code sequence
if x == 1 {
..
} else if x == 2 {
..
} else if x == 3 {
...
}
can be rewritten into
switch x {
case 1:
..
case 2:
..
case 3:
..
}
which is clearer for the compiler: it can decide if the switch is
better served by a jump-table then a branch-chain.
* Rewrite switches, introduce static errors
Introduce two new static errors:
* `ErrNotImplmented`
* `ErrNotSupported`
And use these rather than forming new generative errors whenever the
code is called. Code can now test on the errors (since they are static
and the pointers to them wont change).
Also rewrite ifElse chains into switches in this part of the code base.
* Introduce a StashBoxError in configuration
Since all stashbox errors are the same, treat them as such in the code
base. While here, rewrite an ifElse chain.
In the future, it might be beneifical to refactor configuration errors
into one error which can handle missing fields, which context the error
occurs in and so on. But for now, try to get an overview of the error
categories by hoisting them into static errors.
* Get rid of an else-block in transaction handling
If we succesfully `recover()`, we then always `panic()`. This means the
rest of the code is not reachable, so we can avoid having an else-block
here.
It also solves an ifElse-chain style check in the code base.
* Use strings.ReplaceAll
Rewrite
strings.Replace(s, o, n, -1)
into
strings.ReplaceAll(s, o, n)
To make it consistent and clear that we are doing an all-replace in the
string rather than replacing parts of it. It's more of a nitpick since
there are no implementation differences: the stdlib implementation is
just to supply -1.
* Rewrite via gocritic's assignOp
Statements of the form
x = x + e
is rewritten into
x += e
where applicable.
* Formatting
* Review comments handled
Stash-box is a proper noun.
Rewrite a switch into an if-chain which returns on the first error
encountered.
* Use context.TODO() over context.Background()
Patch in the same vein as everything else: use the TODO() marker so we
can search for it later and link it into the context tree/tentacle once
it reaches down to this level in the code base.
* Tell the linter to ignore a section in manager_tasks.go
The section is less readable, so mark it with a nolint for now. Because
the rewrite enables a ifElseChain, also mark that as nolint for now.
* Use strings.ReplaceAll over strings.Replace
* Apply an ifElse rewrite
else { if .. { .. } } rewrite into else if { .. }
* Use switch-statements over ifElseChains
Rewrite chains of if-else into switch statements. Where applicable,
add an early nil-guard to simplify case analysis. Also, in
ScanTask's Start(..), invert the logic to outdent the whole block, and
help the reader: if it's not a scene, the function flow is now far more
local to the top of the function, and it's clear that the rest of the
function has to do with scene management.
* Enable gocritic on the code base.
Disable appendAssign for now since we aren't passing that check yet.
* Document the nolint additions
* Document StashBoxBatchPerformerTagInput
2021-10-18 03:12:40 +00:00
|
|
|
case models.CriterionModifierLessThan:
|
2021-08-02 03:22:39 +00:00
|
|
|
f.addWhere(fmt.Sprintf("%s < %d", widthHeight, min))
|
Enable gocritic (#1848)
* Don't capitalize local variables
ValidCodecs -> validCodecs
* Capitalize deprecation markers
A deprecated marker should be capitalized.
* Use re.MustCompile for static regexes
If the regex fails to compile, it's a programmer error, and should be
treated as such. The regex is entirely static.
* Simplify else-if constructions
Rewrite
else { if cond {}}
to
else if cond {}
* Use a switch statement to analyze formats
Break an if-else chain. While here, simplify code flow.
Also introduce a proper static error for unsupported image formats,
paving the way for being able to check against the error.
* Rewrite ifElse chains into switch statements
The "Effective Go" https://golang.org/doc/effective_go#switch document
mentions it is more idiomatic to write if-else chains as switches when
it is possible.
Find all the plain rewrite occurrences in the code base and rewrite.
In some cases, the if-else chains are replaced by a switch scrutinizer.
That is, the code sequence
if x == 1 {
..
} else if x == 2 {
..
} else if x == 3 {
...
}
can be rewritten into
switch x {
case 1:
..
case 2:
..
case 3:
..
}
which is clearer for the compiler: it can decide if the switch is
better served by a jump-table then a branch-chain.
* Rewrite switches, introduce static errors
Introduce two new static errors:
* `ErrNotImplmented`
* `ErrNotSupported`
And use these rather than forming new generative errors whenever the
code is called. Code can now test on the errors (since they are static
and the pointers to them wont change).
Also rewrite ifElse chains into switches in this part of the code base.
* Introduce a StashBoxError in configuration
Since all stashbox errors are the same, treat them as such in the code
base. While here, rewrite an ifElse chain.
In the future, it might be beneifical to refactor configuration errors
into one error which can handle missing fields, which context the error
occurs in and so on. But for now, try to get an overview of the error
categories by hoisting them into static errors.
* Get rid of an else-block in transaction handling
If we succesfully `recover()`, we then always `panic()`. This means the
rest of the code is not reachable, so we can avoid having an else-block
here.
It also solves an ifElse-chain style check in the code base.
* Use strings.ReplaceAll
Rewrite
strings.Replace(s, o, n, -1)
into
strings.ReplaceAll(s, o, n)
To make it consistent and clear that we are doing an all-replace in the
string rather than replacing parts of it. It's more of a nitpick since
there are no implementation differences: the stdlib implementation is
just to supply -1.
* Rewrite via gocritic's assignOp
Statements of the form
x = x + e
is rewritten into
x += e
where applicable.
* Formatting
* Review comments handled
Stash-box is a proper noun.
Rewrite a switch into an if-chain which returns on the first error
encountered.
* Use context.TODO() over context.Background()
Patch in the same vein as everything else: use the TODO() marker so we
can search for it later and link it into the context tree/tentacle once
it reaches down to this level in the code base.
* Tell the linter to ignore a section in manager_tasks.go
The section is less readable, so mark it with a nolint for now. Because
the rewrite enables a ifElseChain, also mark that as nolint for now.
* Use strings.ReplaceAll over strings.Replace
* Apply an ifElse rewrite
else { if .. { .. } } rewrite into else if { .. }
* Use switch-statements over ifElseChains
Rewrite chains of if-else into switch statements. Where applicable,
add an early nil-guard to simplify case analysis. Also, in
ScanTask's Start(..), invert the logic to outdent the whole block, and
help the reader: if it's not a scene, the function flow is now far more
local to the top of the function, and it's clear that the rest of the
function has to do with scene management.
* Enable gocritic on the code base.
Disable appendAssign for now since we aren't passing that check yet.
* Document the nolint additions
* Document StashBoxBatchPerformerTagInput
2021-10-18 03:12:40 +00:00
|
|
|
case models.CriterionModifierGreaterThan:
|
2021-08-02 03:22:39 +00:00
|
|
|
f.addWhere(fmt.Sprintf("%s > %d", widthHeight, max))
|
2021-03-02 00:27:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func hasMarkersCriterionHandler(hasMarkers *string) criterionHandlerFunc {
|
|
|
|
return func(f *filterBuilder) {
|
|
|
|
if hasMarkers != nil {
|
|
|
|
f.addJoin("scene_markers", "", "scene_markers.scene_id = scenes.id")
|
|
|
|
if *hasMarkers == "true" {
|
|
|
|
f.addHaving("count(scene_markers.scene_id) > 0")
|
|
|
|
} else {
|
|
|
|
f.addWhere("scene_markers.id IS NULL")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func sceneIsMissingCriterionHandler(qb *sceneQueryBuilder, isMissing *string) criterionHandlerFunc {
|
|
|
|
return func(f *filterBuilder) {
|
|
|
|
if isMissing != nil && *isMissing != "" {
|
|
|
|
switch *isMissing {
|
|
|
|
case "galleries":
|
|
|
|
qb.galleriesRepository().join(f, "galleries_join", "scenes.id")
|
|
|
|
f.addWhere("galleries_join.scene_id IS NULL")
|
|
|
|
case "studio":
|
|
|
|
f.addWhere("scenes.studio_id IS NULL")
|
|
|
|
case "movie":
|
|
|
|
qb.moviesRepository().join(f, "movies_join", "scenes.id")
|
|
|
|
f.addWhere("movies_join.scene_id IS NULL")
|
|
|
|
case "performers":
|
|
|
|
qb.performersRepository().join(f, "performers_join", "scenes.id")
|
|
|
|
f.addWhere("performers_join.scene_id IS NULL")
|
|
|
|
case "date":
|
|
|
|
f.addWhere("scenes.date IS \"\" OR scenes.date IS \"0001-01-01\"")
|
|
|
|
case "tags":
|
|
|
|
qb.tagsRepository().join(f, "tags_join", "scenes.id")
|
|
|
|
f.addWhere("tags_join.scene_id IS NULL")
|
2021-06-21 05:48:28 +00:00
|
|
|
case "stash_id":
|
|
|
|
qb.stashIDRepository().join(f, "scene_stash_ids", "scenes.id")
|
|
|
|
f.addWhere("scene_stash_ids.scene_id IS NULL")
|
2021-03-02 00:27:36 +00:00
|
|
|
default:
|
|
|
|
f.addWhere("(scenes." + *isMissing + " IS NULL OR TRIM(scenes." + *isMissing + ") = '')")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) getMultiCriterionHandlerBuilder(foreignTable, joinTable, foreignFK string, addJoinsFunc func(f *filterBuilder)) multiCriterionHandlerBuilder {
|
|
|
|
return multiCriterionHandlerBuilder{
|
|
|
|
primaryTable: sceneTable,
|
|
|
|
foreignTable: foreignTable,
|
|
|
|
joinTable: joinTable,
|
|
|
|
primaryFK: sceneIDColumn,
|
|
|
|
foreignFK: foreignFK,
|
|
|
|
addJoinsFunc: addJoinsFunc,
|
|
|
|
}
|
|
|
|
}
|
2021-04-09 08:46:00 +00:00
|
|
|
|
Tag hierarchy (#1519)
* Add migration script for tag relations table
* Expand hierarchical filter features
Expand the features of the hierarchical multi input filter with support
for using a relations table, which only has parent_id and child_id
columns, and support adding an additional intermediate table to join on,
for example for scenes and tags which are linked by the scenes_tags
table as well.
* Add hierarchical filtering for tags
* Add hierarchical tags support to scene markers
Refactor filtering of scene markers to filterBuilder and in the process
add support for hierarchical tags as well.
* List parent and child tags on tag details page
* Support setting parent and child tags
Add support for setting parent and child tags during tag creation and
tag updates.
* Validate no loops are created in tags hierarchy
* Update tag merging to support tag hierarcy
* Add unit tests for tags.EnsureUniqueHierarchy
* Fix applying recursive to with clause
The SQL `RECURSIVE` of a `WITH` clause only needs to be applied once,
imediately after the `WITH`. So this fixes the query building to do just
that, automatically applying the `RECURSIVE` keyword when any added with
clause is added as recursive.
* Rename hierarchical root id column
* Rewrite hierarchical filtering for performance
Completely rewrite the hierarchical filtering to optimize for
performance. Doing the recursive query in combination with a complex
query seems to break SQLite optimizing some things which means that the
recursive part might be 2,5 second slower than adding a static
`VALUES()` list. This is mostly noticable in case of the tag hierarchy
where setting an exclusion with any depth (or depth: all) being applied
has this performance impact of 2,5 second. "Include" also suffered this
issue, but some rewritten query by joining in the *_tags table in one
pass and applying a `WHERE x IS NOT NULL` filter did seem to optimize
that case. But that optimization isn't applied to the `IS NULL` filter
of "exclude". Running a simple query beforehand to get all (recursive)
items and then applying them to the query doesn't have this performance
penalty.
* Remove UI references to child studios and tags
* Add parents to tag export
* Support importing of parent relationship for tags
* Assign stable ids to parent / child badges
* Silence Apollo warning on parents/children fields on tags
Silence warning triggered by Apollo GraphQL by explicitly instructing it
to use the incoming parents/children values. By default it already does
this, but it triggers a warning as it might be unintended that it uses
the incoming values (instead of for example merging both arrays).
Setting merge to false still applies the same behaviour (use only
incoming values) but silences the warning as it's explicitly configured
to work like this.
* Rework detecting unique tag hierarchy
Completely rework the unique tag hierarchy to detect invalid hierarchies
for which a tag is "added in the middle". So when there are tags A <- B
and A <- C, you could previously edit tag B and add tag C as a sub tag
without it being noticed as parent A being applied twice (to tag C).
While afterwards saving tag C would fail as tag A was applied as parent
twice. The updated code correctly detects this scenario as well.
Furthermore the error messaging has been reworked a bit and the message
now mentions both the direct parent / sub tag as well as the tag which
would results in the error. So in aboves example it would now show the
message that tag C can't be applied because tag A already is a parent.
* Update relations on cached tags when needed
Update the relations on cached tags when a tag is created / updated /
deleted so these always reflect the correct state. Otherwise (re)opening
a tag might still show the old relations untill the page is fully
reloaded or the list is navigated. But this obviously is strange when
you for example have tag A, create or update tag B to have a relation to
tag A, and from tags B page click through to tag A and it doesn't show
that it is linked to tag B.
2021-09-09 04:58:43 +00:00
|
|
|
func sceneTagsCriterionHandler(qb *sceneQueryBuilder, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc {
|
|
|
|
h := joinedHierarchicalMultiCriterionHandlerBuilder{
|
|
|
|
tx: qb.tx,
|
|
|
|
|
|
|
|
primaryTable: sceneTable,
|
|
|
|
foreignTable: tagTable,
|
|
|
|
foreignFK: "tag_id",
|
|
|
|
|
|
|
|
relationsTable: "tags_relations",
|
|
|
|
joinAs: "scene_tag",
|
|
|
|
joinTable: scenesTagsTable,
|
|
|
|
primaryFK: sceneIDColumn,
|
2021-03-02 00:27:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return h.handler(tags)
|
|
|
|
}
|
|
|
|
|
2021-04-09 08:46:00 +00:00
|
|
|
func sceneTagCountCriterionHandler(qb *sceneQueryBuilder, tagCount *models.IntCriterionInput) criterionHandlerFunc {
|
|
|
|
h := countCriterionHandlerBuilder{
|
|
|
|
primaryTable: sceneTable,
|
|
|
|
joinTable: scenesTagsTable,
|
|
|
|
primaryFK: sceneIDColumn,
|
|
|
|
}
|
|
|
|
|
|
|
|
return h.handler(tagCount)
|
|
|
|
}
|
|
|
|
|
2021-03-02 00:27:36 +00:00
|
|
|
func scenePerformersCriterionHandler(qb *sceneQueryBuilder, performers *models.MultiCriterionInput) criterionHandlerFunc {
|
2021-05-09 09:25:57 +00:00
|
|
|
h := joinedMultiCriterionHandlerBuilder{
|
|
|
|
primaryTable: sceneTable,
|
|
|
|
joinTable: performersScenesTable,
|
|
|
|
joinAs: "performers_join",
|
|
|
|
primaryFK: sceneIDColumn,
|
|
|
|
foreignFK: performerIDColumn,
|
|
|
|
|
|
|
|
addJoinTable: func(f *filterBuilder) {
|
|
|
|
qb.performersRepository().join(f, "performers_join", "scenes.id")
|
|
|
|
},
|
2021-03-02 00:27:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return h.handler(performers)
|
|
|
|
}
|
|
|
|
|
2021-04-09 08:46:00 +00:00
|
|
|
func scenePerformerCountCriterionHandler(qb *sceneQueryBuilder, performerCount *models.IntCriterionInput) criterionHandlerFunc {
|
|
|
|
h := countCriterionHandlerBuilder{
|
|
|
|
primaryTable: sceneTable,
|
|
|
|
joinTable: performersScenesTable,
|
|
|
|
primaryFK: sceneIDColumn,
|
|
|
|
}
|
|
|
|
|
|
|
|
return h.handler(performerCount)
|
|
|
|
}
|
|
|
|
|
2021-06-03 10:52:19 +00:00
|
|
|
func sceneStudioCriterionHandler(qb *sceneQueryBuilder, studios *models.HierarchicalMultiCriterionInput) criterionHandlerFunc {
|
|
|
|
h := hierarchicalMultiCriterionHandlerBuilder{
|
Tag hierarchy (#1519)
* Add migration script for tag relations table
* Expand hierarchical filter features
Expand the features of the hierarchical multi input filter with support
for using a relations table, which only has parent_id and child_id
columns, and support adding an additional intermediate table to join on,
for example for scenes and tags which are linked by the scenes_tags
table as well.
* Add hierarchical filtering for tags
* Add hierarchical tags support to scene markers
Refactor filtering of scene markers to filterBuilder and in the process
add support for hierarchical tags as well.
* List parent and child tags on tag details page
* Support setting parent and child tags
Add support for setting parent and child tags during tag creation and
tag updates.
* Validate no loops are created in tags hierarchy
* Update tag merging to support tag hierarcy
* Add unit tests for tags.EnsureUniqueHierarchy
* Fix applying recursive to with clause
The SQL `RECURSIVE` of a `WITH` clause only needs to be applied once,
imediately after the `WITH`. So this fixes the query building to do just
that, automatically applying the `RECURSIVE` keyword when any added with
clause is added as recursive.
* Rename hierarchical root id column
* Rewrite hierarchical filtering for performance
Completely rewrite the hierarchical filtering to optimize for
performance. Doing the recursive query in combination with a complex
query seems to break SQLite optimizing some things which means that the
recursive part might be 2,5 second slower than adding a static
`VALUES()` list. This is mostly noticable in case of the tag hierarchy
where setting an exclusion with any depth (or depth: all) being applied
has this performance impact of 2,5 second. "Include" also suffered this
issue, but some rewritten query by joining in the *_tags table in one
pass and applying a `WHERE x IS NOT NULL` filter did seem to optimize
that case. But that optimization isn't applied to the `IS NULL` filter
of "exclude". Running a simple query beforehand to get all (recursive)
items and then applying them to the query doesn't have this performance
penalty.
* Remove UI references to child studios and tags
* Add parents to tag export
* Support importing of parent relationship for tags
* Assign stable ids to parent / child badges
* Silence Apollo warning on parents/children fields on tags
Silence warning triggered by Apollo GraphQL by explicitly instructing it
to use the incoming parents/children values. By default it already does
this, but it triggers a warning as it might be unintended that it uses
the incoming values (instead of for example merging both arrays).
Setting merge to false still applies the same behaviour (use only
incoming values) but silences the warning as it's explicitly configured
to work like this.
* Rework detecting unique tag hierarchy
Completely rework the unique tag hierarchy to detect invalid hierarchies
for which a tag is "added in the middle". So when there are tags A <- B
and A <- C, you could previously edit tag B and add tag C as a sub tag
without it being noticed as parent A being applied twice (to tag C).
While afterwards saving tag C would fail as tag A was applied as parent
twice. The updated code correctly detects this scenario as well.
Furthermore the error messaging has been reworked a bit and the message
now mentions both the direct parent / sub tag as well as the tag which
would results in the error. So in aboves example it would now show the
message that tag C can't be applied because tag A already is a parent.
* Update relations on cached tags when needed
Update the relations on cached tags when a tag is created / updated /
deleted so these always reflect the correct state. Otherwise (re)opening
a tag might still show the old relations untill the page is fully
reloaded or the list is navigated. But this obviously is strange when
you for example have tag A, create or update tag B to have a relation to
tag A, and from tags B page click through to tag A and it doesn't show
that it is linked to tag B.
2021-09-09 04:58:43 +00:00
|
|
|
tx: qb.tx,
|
|
|
|
|
2021-06-03 10:52:19 +00:00
|
|
|
primaryTable: sceneTable,
|
|
|
|
foreignTable: studioTable,
|
|
|
|
foreignFK: studioIDColumn,
|
|
|
|
derivedTable: "studio",
|
|
|
|
parentFK: "parent_id",
|
2021-03-02 00:27:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return h.handler(studios)
|
|
|
|
}
|
|
|
|
|
|
|
|
func sceneMoviesCriterionHandler(qb *sceneQueryBuilder, movies *models.MultiCriterionInput) criterionHandlerFunc {
|
|
|
|
addJoinsFunc := func(f *filterBuilder) {
|
|
|
|
qb.moviesRepository().join(f, "movies_join", "scenes.id")
|
|
|
|
f.addJoin("movies", "", "movies_join.movie_id = movies.id")
|
|
|
|
}
|
|
|
|
h := qb.getMultiCriterionHandlerBuilder(movieTable, moviesScenesTable, "movie_id", addJoinsFunc)
|
|
|
|
return h.handler(movies)
|
|
|
|
}
|
|
|
|
|
Tag hierarchy (#1519)
* Add migration script for tag relations table
* Expand hierarchical filter features
Expand the features of the hierarchical multi input filter with support
for using a relations table, which only has parent_id and child_id
columns, and support adding an additional intermediate table to join on,
for example for scenes and tags which are linked by the scenes_tags
table as well.
* Add hierarchical filtering for tags
* Add hierarchical tags support to scene markers
Refactor filtering of scene markers to filterBuilder and in the process
add support for hierarchical tags as well.
* List parent and child tags on tag details page
* Support setting parent and child tags
Add support for setting parent and child tags during tag creation and
tag updates.
* Validate no loops are created in tags hierarchy
* Update tag merging to support tag hierarcy
* Add unit tests for tags.EnsureUniqueHierarchy
* Fix applying recursive to with clause
The SQL `RECURSIVE` of a `WITH` clause only needs to be applied once,
imediately after the `WITH`. So this fixes the query building to do just
that, automatically applying the `RECURSIVE` keyword when any added with
clause is added as recursive.
* Rename hierarchical root id column
* Rewrite hierarchical filtering for performance
Completely rewrite the hierarchical filtering to optimize for
performance. Doing the recursive query in combination with a complex
query seems to break SQLite optimizing some things which means that the
recursive part might be 2,5 second slower than adding a static
`VALUES()` list. This is mostly noticable in case of the tag hierarchy
where setting an exclusion with any depth (or depth: all) being applied
has this performance impact of 2,5 second. "Include" also suffered this
issue, but some rewritten query by joining in the *_tags table in one
pass and applying a `WHERE x IS NOT NULL` filter did seem to optimize
that case. But that optimization isn't applied to the `IS NULL` filter
of "exclude". Running a simple query beforehand to get all (recursive)
items and then applying them to the query doesn't have this performance
penalty.
* Remove UI references to child studios and tags
* Add parents to tag export
* Support importing of parent relationship for tags
* Assign stable ids to parent / child badges
* Silence Apollo warning on parents/children fields on tags
Silence warning triggered by Apollo GraphQL by explicitly instructing it
to use the incoming parents/children values. By default it already does
this, but it triggers a warning as it might be unintended that it uses
the incoming values (instead of for example merging both arrays).
Setting merge to false still applies the same behaviour (use only
incoming values) but silences the warning as it's explicitly configured
to work like this.
* Rework detecting unique tag hierarchy
Completely rework the unique tag hierarchy to detect invalid hierarchies
for which a tag is "added in the middle". So when there are tags A <- B
and A <- C, you could previously edit tag B and add tag C as a sub tag
without it being noticed as parent A being applied twice (to tag C).
While afterwards saving tag C would fail as tag A was applied as parent
twice. The updated code correctly detects this scenario as well.
Furthermore the error messaging has been reworked a bit and the message
now mentions both the direct parent / sub tag as well as the tag which
would results in the error. So in aboves example it would now show the
message that tag C can't be applied because tag A already is a parent.
* Update relations on cached tags when needed
Update the relations on cached tags when a tag is created / updated /
deleted so these always reflect the correct state. Otherwise (re)opening
a tag might still show the old relations untill the page is fully
reloaded or the list is navigated. But this obviously is strange when
you for example have tag A, create or update tag B to have a relation to
tag A, and from tags B page click through to tag A and it doesn't show
that it is linked to tag B.
2021-09-09 04:58:43 +00:00
|
|
|
func scenePerformerTagsCriterionHandler(qb *sceneQueryBuilder, tags *models.HierarchicalMultiCriterionInput) criterionHandlerFunc {
|
2021-03-10 01:25:51 +00:00
|
|
|
return func(f *filterBuilder) {
|
Tag hierarchy (#1519)
* Add migration script for tag relations table
* Expand hierarchical filter features
Expand the features of the hierarchical multi input filter with support
for using a relations table, which only has parent_id and child_id
columns, and support adding an additional intermediate table to join on,
for example for scenes and tags which are linked by the scenes_tags
table as well.
* Add hierarchical filtering for tags
* Add hierarchical tags support to scene markers
Refactor filtering of scene markers to filterBuilder and in the process
add support for hierarchical tags as well.
* List parent and child tags on tag details page
* Support setting parent and child tags
Add support for setting parent and child tags during tag creation and
tag updates.
* Validate no loops are created in tags hierarchy
* Update tag merging to support tag hierarcy
* Add unit tests for tags.EnsureUniqueHierarchy
* Fix applying recursive to with clause
The SQL `RECURSIVE` of a `WITH` clause only needs to be applied once,
imediately after the `WITH`. So this fixes the query building to do just
that, automatically applying the `RECURSIVE` keyword when any added with
clause is added as recursive.
* Rename hierarchical root id column
* Rewrite hierarchical filtering for performance
Completely rewrite the hierarchical filtering to optimize for
performance. Doing the recursive query in combination with a complex
query seems to break SQLite optimizing some things which means that the
recursive part might be 2,5 second slower than adding a static
`VALUES()` list. This is mostly noticable in case of the tag hierarchy
where setting an exclusion with any depth (or depth: all) being applied
has this performance impact of 2,5 second. "Include" also suffered this
issue, but some rewritten query by joining in the *_tags table in one
pass and applying a `WHERE x IS NOT NULL` filter did seem to optimize
that case. But that optimization isn't applied to the `IS NULL` filter
of "exclude". Running a simple query beforehand to get all (recursive)
items and then applying them to the query doesn't have this performance
penalty.
* Remove UI references to child studios and tags
* Add parents to tag export
* Support importing of parent relationship for tags
* Assign stable ids to parent / child badges
* Silence Apollo warning on parents/children fields on tags
Silence warning triggered by Apollo GraphQL by explicitly instructing it
to use the incoming parents/children values. By default it already does
this, but it triggers a warning as it might be unintended that it uses
the incoming values (instead of for example merging both arrays).
Setting merge to false still applies the same behaviour (use only
incoming values) but silences the warning as it's explicitly configured
to work like this.
* Rework detecting unique tag hierarchy
Completely rework the unique tag hierarchy to detect invalid hierarchies
for which a tag is "added in the middle". So when there are tags A <- B
and A <- C, you could previously edit tag B and add tag C as a sub tag
without it being noticed as parent A being applied twice (to tag C).
While afterwards saving tag C would fail as tag A was applied as parent
twice. The updated code correctly detects this scenario as well.
Furthermore the error messaging has been reworked a bit and the message
now mentions both the direct parent / sub tag as well as the tag which
would results in the error. So in aboves example it would now show the
message that tag C can't be applied because tag A already is a parent.
* Update relations on cached tags when needed
Update the relations on cached tags when a tag is created / updated /
deleted so these always reflect the correct state. Otherwise (re)opening
a tag might still show the old relations untill the page is fully
reloaded or the list is navigated. But this obviously is strange when
you for example have tag A, create or update tag B to have a relation to
tag A, and from tags B page click through to tag A and it doesn't show
that it is linked to tag B.
2021-09-09 04:58:43 +00:00
|
|
|
if tags != nil && len(tags.Value) > 0 {
|
|
|
|
valuesClause := getHierarchicalValues(qb.tx, tags.Value, tagTable, "tags_relations", "", tags.Depth)
|
2021-03-10 01:25:51 +00:00
|
|
|
|
Tag hierarchy (#1519)
* Add migration script for tag relations table
* Expand hierarchical filter features
Expand the features of the hierarchical multi input filter with support
for using a relations table, which only has parent_id and child_id
columns, and support adding an additional intermediate table to join on,
for example for scenes and tags which are linked by the scenes_tags
table as well.
* Add hierarchical filtering for tags
* Add hierarchical tags support to scene markers
Refactor filtering of scene markers to filterBuilder and in the process
add support for hierarchical tags as well.
* List parent and child tags on tag details page
* Support setting parent and child tags
Add support for setting parent and child tags during tag creation and
tag updates.
* Validate no loops are created in tags hierarchy
* Update tag merging to support tag hierarcy
* Add unit tests for tags.EnsureUniqueHierarchy
* Fix applying recursive to with clause
The SQL `RECURSIVE` of a `WITH` clause only needs to be applied once,
imediately after the `WITH`. So this fixes the query building to do just
that, automatically applying the `RECURSIVE` keyword when any added with
clause is added as recursive.
* Rename hierarchical root id column
* Rewrite hierarchical filtering for performance
Completely rewrite the hierarchical filtering to optimize for
performance. Doing the recursive query in combination with a complex
query seems to break SQLite optimizing some things which means that the
recursive part might be 2,5 second slower than adding a static
`VALUES()` list. This is mostly noticable in case of the tag hierarchy
where setting an exclusion with any depth (or depth: all) being applied
has this performance impact of 2,5 second. "Include" also suffered this
issue, but some rewritten query by joining in the *_tags table in one
pass and applying a `WHERE x IS NOT NULL` filter did seem to optimize
that case. But that optimization isn't applied to the `IS NULL` filter
of "exclude". Running a simple query beforehand to get all (recursive)
items and then applying them to the query doesn't have this performance
penalty.
* Remove UI references to child studios and tags
* Add parents to tag export
* Support importing of parent relationship for tags
* Assign stable ids to parent / child badges
* Silence Apollo warning on parents/children fields on tags
Silence warning triggered by Apollo GraphQL by explicitly instructing it
to use the incoming parents/children values. By default it already does
this, but it triggers a warning as it might be unintended that it uses
the incoming values (instead of for example merging both arrays).
Setting merge to false still applies the same behaviour (use only
incoming values) but silences the warning as it's explicitly configured
to work like this.
* Rework detecting unique tag hierarchy
Completely rework the unique tag hierarchy to detect invalid hierarchies
for which a tag is "added in the middle". So when there are tags A <- B
and A <- C, you could previously edit tag B and add tag C as a sub tag
without it being noticed as parent A being applied twice (to tag C).
While afterwards saving tag C would fail as tag A was applied as parent
twice. The updated code correctly detects this scenario as well.
Furthermore the error messaging has been reworked a bit and the message
now mentions both the direct parent / sub tag as well as the tag which
would results in the error. So in aboves example it would now show the
message that tag C can't be applied because tag A already is a parent.
* Update relations on cached tags when needed
Update the relations on cached tags when a tag is created / updated /
deleted so these always reflect the correct state. Otherwise (re)opening
a tag might still show the old relations untill the page is fully
reloaded or the list is navigated. But this obviously is strange when
you for example have tag A, create or update tag B to have a relation to
tag A, and from tags B page click through to tag A and it doesn't show
that it is linked to tag B.
2021-09-09 04:58:43 +00:00
|
|
|
f.addWith(`performer_tags AS (
|
|
|
|
SELECT ps.scene_id, t.column1 AS root_tag_id FROM performers_scenes ps
|
|
|
|
INNER JOIN performers_tags pt ON pt.performer_id = ps.performer_id
|
|
|
|
INNER JOIN (` + valuesClause + `) t ON t.column2 = pt.tag_id
|
|
|
|
)`)
|
2021-03-10 01:25:51 +00:00
|
|
|
|
Tag hierarchy (#1519)
* Add migration script for tag relations table
* Expand hierarchical filter features
Expand the features of the hierarchical multi input filter with support
for using a relations table, which only has parent_id and child_id
columns, and support adding an additional intermediate table to join on,
for example for scenes and tags which are linked by the scenes_tags
table as well.
* Add hierarchical filtering for tags
* Add hierarchical tags support to scene markers
Refactor filtering of scene markers to filterBuilder and in the process
add support for hierarchical tags as well.
* List parent and child tags on tag details page
* Support setting parent and child tags
Add support for setting parent and child tags during tag creation and
tag updates.
* Validate no loops are created in tags hierarchy
* Update tag merging to support tag hierarcy
* Add unit tests for tags.EnsureUniqueHierarchy
* Fix applying recursive to with clause
The SQL `RECURSIVE` of a `WITH` clause only needs to be applied once,
imediately after the `WITH`. So this fixes the query building to do just
that, automatically applying the `RECURSIVE` keyword when any added with
clause is added as recursive.
* Rename hierarchical root id column
* Rewrite hierarchical filtering for performance
Completely rewrite the hierarchical filtering to optimize for
performance. Doing the recursive query in combination with a complex
query seems to break SQLite optimizing some things which means that the
recursive part might be 2,5 second slower than adding a static
`VALUES()` list. This is mostly noticable in case of the tag hierarchy
where setting an exclusion with any depth (or depth: all) being applied
has this performance impact of 2,5 second. "Include" also suffered this
issue, but some rewritten query by joining in the *_tags table in one
pass and applying a `WHERE x IS NOT NULL` filter did seem to optimize
that case. But that optimization isn't applied to the `IS NULL` filter
of "exclude". Running a simple query beforehand to get all (recursive)
items and then applying them to the query doesn't have this performance
penalty.
* Remove UI references to child studios and tags
* Add parents to tag export
* Support importing of parent relationship for tags
* Assign stable ids to parent / child badges
* Silence Apollo warning on parents/children fields on tags
Silence warning triggered by Apollo GraphQL by explicitly instructing it
to use the incoming parents/children values. By default it already does
this, but it triggers a warning as it might be unintended that it uses
the incoming values (instead of for example merging both arrays).
Setting merge to false still applies the same behaviour (use only
incoming values) but silences the warning as it's explicitly configured
to work like this.
* Rework detecting unique tag hierarchy
Completely rework the unique tag hierarchy to detect invalid hierarchies
for which a tag is "added in the middle". So when there are tags A <- B
and A <- C, you could previously edit tag B and add tag C as a sub tag
without it being noticed as parent A being applied twice (to tag C).
While afterwards saving tag C would fail as tag A was applied as parent
twice. The updated code correctly detects this scenario as well.
Furthermore the error messaging has been reworked a bit and the message
now mentions both the direct parent / sub tag as well as the tag which
would results in the error. So in aboves example it would now show the
message that tag C can't be applied because tag A already is a parent.
* Update relations on cached tags when needed
Update the relations on cached tags when a tag is created / updated /
deleted so these always reflect the correct state. Otherwise (re)opening
a tag might still show the old relations untill the page is fully
reloaded or the list is navigated. But this obviously is strange when
you for example have tag A, create or update tag B to have a relation to
tag A, and from tags B page click through to tag A and it doesn't show
that it is linked to tag B.
2021-09-09 04:58:43 +00:00
|
|
|
f.addJoin("performer_tags", "", "performer_tags.scene_id = scenes.id")
|
|
|
|
|
|
|
|
addHierarchicalConditionClauses(f, tags, "performer_tags", "root_tag_id")
|
2021-03-10 01:25:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-09 08:46:00 +00:00
|
|
|
func (qb *sceneQueryBuilder) getDefaultSceneSort() string {
|
|
|
|
return " ORDER BY scenes.path, scenes.date ASC "
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) setSceneSort(query *queryBuilder, findFilter *models.FindFilterType) {
|
2019-02-09 12:30:49 +00:00
|
|
|
if findFilter == nil {
|
2021-04-09 08:46:00 +00:00
|
|
|
query.sortAndPagination += qb.getDefaultSceneSort()
|
|
|
|
return
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
2019-02-14 22:53:32 +00:00
|
|
|
sort := findFilter.GetSort("title")
|
|
|
|
direction := findFilter.GetDirection()
|
2021-04-09 08:46:00 +00:00
|
|
|
switch sort {
|
2021-04-22 02:22:51 +00:00
|
|
|
case "movie_scene_number":
|
2021-05-27 23:01:03 +00:00
|
|
|
query.join(moviesScenesTable, "movies_join", "scenes.id = movies_join.scene_id")
|
2021-04-22 02:22:51 +00:00
|
|
|
query.sortAndPagination += fmt.Sprintf(" ORDER BY movies_join.scene_index %s", getSortDirection(direction))
|
2021-04-09 08:46:00 +00:00
|
|
|
case "tag_count":
|
|
|
|
query.sortAndPagination += getCountSort(sceneTable, scenesTagsTable, sceneIDColumn, direction)
|
|
|
|
case "performer_count":
|
|
|
|
query.sortAndPagination += getCountSort(sceneTable, performersScenesTable, sceneIDColumn, direction)
|
|
|
|
default:
|
|
|
|
query.sortAndPagination += getSort(sort, direction, "scenes")
|
|
|
|
}
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) queryScene(query string, args []interface{}) (*models.Scene, error) {
|
|
|
|
results, err := qb.queryScenes(query, args)
|
2019-02-09 12:30:49 +00:00
|
|
|
if err != nil || len(results) < 1 {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-05-27 19:34:26 +00:00
|
|
|
return results[0], nil
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) queryScenes(query string, args []interface{}) ([]*models.Scene, error) {
|
|
|
|
var ret models.Scenes
|
|
|
|
if err := qb.query(query, args, &ret); err != nil {
|
|
|
|
return nil, err
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
return []*models.Scene(ret), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) imageRepository() *imageRepository {
|
|
|
|
return &imageRepository{
|
|
|
|
repository: repository{
|
|
|
|
tx: qb.tx,
|
|
|
|
tableName: "scenes_cover",
|
|
|
|
idColumn: sceneIDColumn,
|
|
|
|
},
|
|
|
|
imageColumn: "cover",
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
2021-01-18 01:23:20 +00:00
|
|
|
}
|
2019-02-09 12:30:49 +00:00
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) GetCover(sceneID int) ([]byte, error) {
|
|
|
|
return qb.imageRepository().get(sceneID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) UpdateCover(sceneID int, image []byte) error {
|
|
|
|
return qb.imageRepository().replace(sceneID, image)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) DestroyCover(sceneID int) error {
|
|
|
|
return qb.imageRepository().destroy([]int{sceneID})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) moviesRepository() *repository {
|
|
|
|
return &repository{
|
|
|
|
tx: qb.tx,
|
|
|
|
tableName: moviesScenesTable,
|
|
|
|
idColumn: sceneIDColumn,
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
2021-01-18 01:23:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) GetMovies(id int) (ret []models.MoviesScenes, err error) {
|
|
|
|
if err := qb.moviesRepository().getAll(id, func(rows *sqlx.Rows) error {
|
|
|
|
var ms models.MoviesScenes
|
|
|
|
if err := rows.StructScan(&ms); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-02-09 12:30:49 +00:00
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
ret = append(ret, ms)
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
2019-02-09 12:30:49 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
return ret, nil
|
2019-02-09 12:30:49 +00:00
|
|
|
}
|
2020-04-09 22:38:34 +00:00
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) UpdateMovies(sceneID int, movies []models.MoviesScenes) error {
|
|
|
|
// destroy existing joins
|
|
|
|
r := qb.moviesRepository()
|
|
|
|
if err := r.destroy([]int{sceneID}); err != nil {
|
2020-04-09 22:38:34 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
for _, m := range movies {
|
|
|
|
m.SceneID = sceneID
|
|
|
|
if _, err := r.insert(m); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-08-06 01:21:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) performersRepository() *joinRepository {
|
|
|
|
return &joinRepository{
|
|
|
|
repository: repository{
|
|
|
|
tx: qb.tx,
|
|
|
|
tableName: performersScenesTable,
|
|
|
|
idColumn: sceneIDColumn,
|
|
|
|
},
|
|
|
|
fkColumn: performerIDColumn,
|
2020-08-06 01:21:14 +00:00
|
|
|
}
|
2021-01-18 01:23:20 +00:00
|
|
|
}
|
2020-08-06 01:21:14 +00:00
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) GetPerformerIDs(id int) ([]int, error) {
|
|
|
|
return qb.performersRepository().getIDs(id)
|
2020-08-06 01:21:14 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) UpdatePerformers(id int, performerIDs []int) error {
|
|
|
|
// Delete the existing joins and then create new ones
|
|
|
|
return qb.performersRepository().replace(id, performerIDs)
|
|
|
|
}
|
2020-06-22 23:19:19 +00:00
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) tagsRepository() *joinRepository {
|
|
|
|
return &joinRepository{
|
|
|
|
repository: repository{
|
|
|
|
tx: qb.tx,
|
|
|
|
tableName: scenesTagsTable,
|
|
|
|
idColumn: sceneIDColumn,
|
|
|
|
},
|
|
|
|
fkColumn: tagIDColumn,
|
2020-06-22 23:19:19 +00:00
|
|
|
}
|
2021-01-18 01:23:20 +00:00
|
|
|
}
|
2020-06-22 23:19:19 +00:00
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) GetTagIDs(id int) ([]int, error) {
|
|
|
|
return qb.tagsRepository().getIDs(id)
|
2020-06-22 23:19:19 +00:00
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) UpdateTags(id int, tagIDs []int) error {
|
|
|
|
// Delete the existing joins and then create new ones
|
|
|
|
return qb.tagsRepository().replace(id, tagIDs)
|
|
|
|
}
|
2020-06-22 23:19:19 +00:00
|
|
|
|
2021-02-01 20:56:54 +00:00
|
|
|
func (qb *sceneQueryBuilder) galleriesRepository() *joinRepository {
|
|
|
|
return &joinRepository{
|
|
|
|
repository: repository{
|
|
|
|
tx: qb.tx,
|
|
|
|
tableName: scenesGalleriesTable,
|
|
|
|
idColumn: sceneIDColumn,
|
|
|
|
},
|
|
|
|
fkColumn: galleryIDColumn,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) GetGalleryIDs(id int) ([]int, error) {
|
|
|
|
return qb.galleriesRepository().getIDs(id)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) UpdateGalleries(id int, galleryIDs []int) error {
|
|
|
|
// Delete the existing joins and then create new ones
|
|
|
|
return qb.galleriesRepository().replace(id, galleryIDs)
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) stashIDRepository() *stashIDRepository {
|
|
|
|
return &stashIDRepository{
|
|
|
|
repository{
|
|
|
|
tx: qb.tx,
|
|
|
|
tableName: "scene_stash_ids",
|
|
|
|
idColumn: sceneIDColumn,
|
|
|
|
},
|
2020-06-22 23:19:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-18 01:23:20 +00:00
|
|
|
func (qb *sceneQueryBuilder) GetStashIDs(sceneID int) ([]*models.StashID, error) {
|
|
|
|
return qb.stashIDRepository().get(sceneID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) UpdateStashIDs(sceneID int, stashIDs []models.StashID) error {
|
|
|
|
return qb.stashIDRepository().replace(sceneID, stashIDs)
|
2020-06-22 23:19:19 +00:00
|
|
|
}
|
2021-04-11 23:04:40 +00:00
|
|
|
|
|
|
|
func (qb *sceneQueryBuilder) FindDuplicates(distance int) ([][]*models.Scene, error) {
|
|
|
|
var dupeIds [][]int
|
|
|
|
if distance == 0 {
|
|
|
|
var ids []string
|
|
|
|
if err := qb.tx.Select(&ids, findExactDuplicateQuery); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range ids {
|
|
|
|
strIds := strings.Split(id, ",")
|
|
|
|
var sceneIds []int
|
|
|
|
for _, strId := range strIds {
|
|
|
|
if intId, err := strconv.Atoi(strId); err == nil {
|
|
|
|
sceneIds = append(sceneIds, intId)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dupeIds = append(dupeIds, sceneIds)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
var hashes []*utils.Phash
|
|
|
|
|
2021-10-25 00:40:13 +00:00
|
|
|
if err := qb.queryFunc(findAllPhashesQuery, nil, false, func(rows *sqlx.Rows) error {
|
2021-04-11 23:04:40 +00:00
|
|
|
phash := utils.Phash{
|
|
|
|
Bucket: -1,
|
|
|
|
}
|
|
|
|
if err := rows.StructScan(&phash); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
hashes = append(hashes, &phash)
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
dupeIds = utils.FindDuplicates(hashes, distance)
|
|
|
|
}
|
|
|
|
|
|
|
|
var duplicates [][]*models.Scene
|
|
|
|
for _, sceneIds := range dupeIds {
|
|
|
|
if scenes, err := qb.FindMany(sceneIds); err == nil {
|
|
|
|
duplicates = append(duplicates, scenes)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return duplicates, nil
|
|
|
|
}
|