stash/pkg/sqlite/repository.go

538 lines
13 KiB
Go
Raw Normal View History

package sqlite
import (
"database/sql"
Errorlint sweep + minor linter tweaks (#1796) * Replace error assertions with Go 1.13 style Use `errors.As(..)` over type assertions. This enables better use of wrapped errors in the future, and lets us pass some errorlint checks in the process. The rewrite is entirely mechanical, and uses a standard idiom for doing so. * Use Go 1.13's errors.Is(..) Rather than directly checking for error equality, use errors.Is(..). This protects against error wrapping issues in the future. Even though something like sql.ErrNoRows doesn't need the wrapping, do so anyway, for the sake of consistency throughout the code base. The change almost lets us pass the `errorlint` Go checker except for a missing case in `js.go` which is to be handled separately; it isn't mechanical, like these changes are. * Remove goconst goconst isn't a useful linter in many cases, because it's false positive rate is high. It's 100% for the current code base. * Avoid direct comparison of errors in recover() Assert that we are catching an error from recover(). If we are, check that the error caught matches errStop. * Enable the "errorlint" checker Configure the checker to avoid checking for errorf wraps. These are often false positives since the suggestion is to blanket wrap errors with %w, and that exposes the underlying API which you might not want to do. The other warnings are good however, and with the current patch stack, the code base passes all these checks as well. * Configure rowserrcheck The project uses sqlx. Configure rowserrcheck to include said package. * Mechanically rewrite a large set of errors Mechanically search for errors that look like fmt.Errorf("...%s", err.Error()) and rewrite those into fmt.Errorf("...%v", err) The `fmt` package is error-aware and knows how to call err.Error() itself. The rationale is that this is more idiomatic Go; it paves the way for using error wrapping later with %w in some sites. This patch only addresses the entirely mechanical rewriting caught by a project-side search/replace. There are more individual sites not addressed by this patch.
2021-10-12 03:03:08 +00:00
"errors"
"fmt"
"reflect"
"strings"
"github.com/jmoiron/sqlx"
"github.com/stashapp/stash/pkg/logger"
"github.com/stashapp/stash/pkg/models"
)
const idColumn = "id"
type objectList interface {
Append(o interface{})
New() interface{}
}
type repository struct {
tx dbi
tableName string
idColumn string
}
func (r *repository) get(id int, dest interface{}) error {
stmt := fmt.Sprintf("SELECT * FROM %s WHERE %s = ? LIMIT 1", r.tableName, r.idColumn)
return r.tx.Get(dest, stmt, id)
}
func (r *repository) getAll(id int, f func(rows *sqlx.Rows) error) error {
stmt := fmt.Sprintf("SELECT * FROM %s WHERE %s = ?", r.tableName, r.idColumn)
return r.queryFunc(stmt, []interface{}{id}, false, f)
}
func (r *repository) insert(obj interface{}) (sql.Result, error) {
stmt := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", r.tableName, listKeys(obj, false), listKeys(obj, true))
return r.tx.NamedExec(stmt, obj)
}
func (r *repository) insertObject(obj interface{}, out interface{}) error {
result, err := r.insert(obj)
if err != nil {
return err
}
id, err := result.LastInsertId()
if err != nil {
return err
}
return r.get(int(id), out)
}
func (r *repository) update(id int, obj interface{}, partial bool) error {
exists, err := r.exists(id)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("%s %d does not exist in %s", r.idColumn, id, r.tableName)
}
stmt := fmt.Sprintf("UPDATE %s SET %s WHERE %s.%s = :id", r.tableName, updateSet(obj, partial), r.tableName, r.idColumn)
_, err = r.tx.NamedExec(stmt, obj)
return err
}
func (r *repository) updateMap(id int, m map[string]interface{}) error {
exists, err := r.exists(id)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("%s %d does not exist in %s", r.idColumn, id, r.tableName)
}
stmt := fmt.Sprintf("UPDATE %s SET %s WHERE %s.%s = :id", r.tableName, updateSetMap(m), r.tableName, r.idColumn)
_, err = r.tx.NamedExec(stmt, m)
return err
}
func (r *repository) destroyExisting(ids []int) error {
for _, id := range ids {
exists, err := r.exists(id)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("%s %d does not exist in %s", r.idColumn, id, r.tableName)
}
}
return r.destroy(ids)
}
func (r *repository) destroy(ids []int) error {
for _, id := range ids {
stmt := fmt.Sprintf("DELETE FROM %s WHERE %s = ?", r.tableName, r.idColumn)
if _, err := r.tx.Exec(stmt, id); err != nil {
return err
}
}
return nil
}
func (r *repository) exists(id int) (bool, error) {
stmt := fmt.Sprintf("SELECT %s FROM %s WHERE %s = ? LIMIT 1", r.idColumn, r.tableName, r.idColumn)
stmt = r.buildCountQuery(stmt)
c, err := r.runCountQuery(stmt, []interface{}{id})
if err != nil {
return false, err
}
return c == 1, nil
}
func (r *repository) buildCountQuery(query string) string {
return "SELECT COUNT(*) as count FROM (" + query + ") as temp"
}
func (r *repository) runCountQuery(query string, args []interface{}) (int, error) {
result := struct {
Int int `db:"count"`
}{0}
// Perform query and fetch result
Errorlint sweep + minor linter tweaks (#1796) * Replace error assertions with Go 1.13 style Use `errors.As(..)` over type assertions. This enables better use of wrapped errors in the future, and lets us pass some errorlint checks in the process. The rewrite is entirely mechanical, and uses a standard idiom for doing so. * Use Go 1.13's errors.Is(..) Rather than directly checking for error equality, use errors.Is(..). This protects against error wrapping issues in the future. Even though something like sql.ErrNoRows doesn't need the wrapping, do so anyway, for the sake of consistency throughout the code base. The change almost lets us pass the `errorlint` Go checker except for a missing case in `js.go` which is to be handled separately; it isn't mechanical, like these changes are. * Remove goconst goconst isn't a useful linter in many cases, because it's false positive rate is high. It's 100% for the current code base. * Avoid direct comparison of errors in recover() Assert that we are catching an error from recover(). If we are, check that the error caught matches errStop. * Enable the "errorlint" checker Configure the checker to avoid checking for errorf wraps. These are often false positives since the suggestion is to blanket wrap errors with %w, and that exposes the underlying API which you might not want to do. The other warnings are good however, and with the current patch stack, the code base passes all these checks as well. * Configure rowserrcheck The project uses sqlx. Configure rowserrcheck to include said package. * Mechanically rewrite a large set of errors Mechanically search for errors that look like fmt.Errorf("...%s", err.Error()) and rewrite those into fmt.Errorf("...%v", err) The `fmt` package is error-aware and knows how to call err.Error() itself. The rationale is that this is more idiomatic Go; it paves the way for using error wrapping later with %w in some sites. This patch only addresses the entirely mechanical rewriting caught by a project-side search/replace. There are more individual sites not addressed by this patch.
2021-10-12 03:03:08 +00:00
if err := r.tx.Get(&result, query, args...); err != nil && !errors.Is(err, sql.ErrNoRows) {
return 0, err
}
return result.Int, nil
}
func (r *repository) runIdsQuery(query string, args []interface{}) ([]int, error) {
var result []struct {
Int int `db:"id"`
}
Errorlint sweep + minor linter tweaks (#1796) * Replace error assertions with Go 1.13 style Use `errors.As(..)` over type assertions. This enables better use of wrapped errors in the future, and lets us pass some errorlint checks in the process. The rewrite is entirely mechanical, and uses a standard idiom for doing so. * Use Go 1.13's errors.Is(..) Rather than directly checking for error equality, use errors.Is(..). This protects against error wrapping issues in the future. Even though something like sql.ErrNoRows doesn't need the wrapping, do so anyway, for the sake of consistency throughout the code base. The change almost lets us pass the `errorlint` Go checker except for a missing case in `js.go` which is to be handled separately; it isn't mechanical, like these changes are. * Remove goconst goconst isn't a useful linter in many cases, because it's false positive rate is high. It's 100% for the current code base. * Avoid direct comparison of errors in recover() Assert that we are catching an error from recover(). If we are, check that the error caught matches errStop. * Enable the "errorlint" checker Configure the checker to avoid checking for errorf wraps. These are often false positives since the suggestion is to blanket wrap errors with %w, and that exposes the underlying API which you might not want to do. The other warnings are good however, and with the current patch stack, the code base passes all these checks as well. * Configure rowserrcheck The project uses sqlx. Configure rowserrcheck to include said package. * Mechanically rewrite a large set of errors Mechanically search for errors that look like fmt.Errorf("...%s", err.Error()) and rewrite those into fmt.Errorf("...%v", err) The `fmt` package is error-aware and knows how to call err.Error() itself. The rationale is that this is more idiomatic Go; it paves the way for using error wrapping later with %w in some sites. This patch only addresses the entirely mechanical rewriting caught by a project-side search/replace. There are more individual sites not addressed by this patch.
2021-10-12 03:03:08 +00:00
if err := r.tx.Select(&result, query, args...); err != nil && !errors.Is(err, sql.ErrNoRows) {
return []int{}, err
}
vsm := make([]int, len(result))
for i, v := range result {
vsm[i] = v.Int
}
return vsm, nil
}
func (r *repository) runSumQuery(query string, args []interface{}) (float64, error) {
// Perform query and fetch result
result := struct {
Float64 float64 `db:"sum"`
}{0}
// Perform query and fetch result
Errorlint sweep + minor linter tweaks (#1796) * Replace error assertions with Go 1.13 style Use `errors.As(..)` over type assertions. This enables better use of wrapped errors in the future, and lets us pass some errorlint checks in the process. The rewrite is entirely mechanical, and uses a standard idiom for doing so. * Use Go 1.13's errors.Is(..) Rather than directly checking for error equality, use errors.Is(..). This protects against error wrapping issues in the future. Even though something like sql.ErrNoRows doesn't need the wrapping, do so anyway, for the sake of consistency throughout the code base. The change almost lets us pass the `errorlint` Go checker except for a missing case in `js.go` which is to be handled separately; it isn't mechanical, like these changes are. * Remove goconst goconst isn't a useful linter in many cases, because it's false positive rate is high. It's 100% for the current code base. * Avoid direct comparison of errors in recover() Assert that we are catching an error from recover(). If we are, check that the error caught matches errStop. * Enable the "errorlint" checker Configure the checker to avoid checking for errorf wraps. These are often false positives since the suggestion is to blanket wrap errors with %w, and that exposes the underlying API which you might not want to do. The other warnings are good however, and with the current patch stack, the code base passes all these checks as well. * Configure rowserrcheck The project uses sqlx. Configure rowserrcheck to include said package. * Mechanically rewrite a large set of errors Mechanically search for errors that look like fmt.Errorf("...%s", err.Error()) and rewrite those into fmt.Errorf("...%v", err) The `fmt` package is error-aware and knows how to call err.Error() itself. The rationale is that this is more idiomatic Go; it paves the way for using error wrapping later with %w in some sites. This patch only addresses the entirely mechanical rewriting caught by a project-side search/replace. There are more individual sites not addressed by this patch.
2021-10-12 03:03:08 +00:00
if err := r.tx.Get(&result, query, args...); err != nil && !errors.Is(err, sql.ErrNoRows) {
return 0, err
}
return result.Float64, nil
}
func (r *repository) queryFunc(query string, args []interface{}, single bool, f func(rows *sqlx.Rows) error) error {
logger.Tracef("SQL: %s, args: %v", query, args)
rows, err := r.tx.Queryx(query, args...)
Errorlint sweep + minor linter tweaks (#1796) * Replace error assertions with Go 1.13 style Use `errors.As(..)` over type assertions. This enables better use of wrapped errors in the future, and lets us pass some errorlint checks in the process. The rewrite is entirely mechanical, and uses a standard idiom for doing so. * Use Go 1.13's errors.Is(..) Rather than directly checking for error equality, use errors.Is(..). This protects against error wrapping issues in the future. Even though something like sql.ErrNoRows doesn't need the wrapping, do so anyway, for the sake of consistency throughout the code base. The change almost lets us pass the `errorlint` Go checker except for a missing case in `js.go` which is to be handled separately; it isn't mechanical, like these changes are. * Remove goconst goconst isn't a useful linter in many cases, because it's false positive rate is high. It's 100% for the current code base. * Avoid direct comparison of errors in recover() Assert that we are catching an error from recover(). If we are, check that the error caught matches errStop. * Enable the "errorlint" checker Configure the checker to avoid checking for errorf wraps. These are often false positives since the suggestion is to blanket wrap errors with %w, and that exposes the underlying API which you might not want to do. The other warnings are good however, and with the current patch stack, the code base passes all these checks as well. * Configure rowserrcheck The project uses sqlx. Configure rowserrcheck to include said package. * Mechanically rewrite a large set of errors Mechanically search for errors that look like fmt.Errorf("...%s", err.Error()) and rewrite those into fmt.Errorf("...%v", err) The `fmt` package is error-aware and knows how to call err.Error() itself. The rationale is that this is more idiomatic Go; it paves the way for using error wrapping later with %w in some sites. This patch only addresses the entirely mechanical rewriting caught by a project-side search/replace. There are more individual sites not addressed by this patch.
2021-10-12 03:03:08 +00:00
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return err
}
defer rows.Close()
for rows.Next() {
if err := f(rows); err != nil {
return err
}
if single {
break
}
}
if err := rows.Err(); err != nil {
return err
}
return nil
}
func (r *repository) query(query string, args []interface{}, out objectList) error {
return r.queryFunc(query, args, false, func(rows *sqlx.Rows) error {
object := out.New()
if err := rows.StructScan(object); err != nil {
return err
}
out.Append(object)
return nil
})
}
func (r *repository) queryStruct(query string, args []interface{}, out interface{}) error {
return r.queryFunc(query, args, true, func(rows *sqlx.Rows) error {
if err := rows.StructScan(out); err != nil {
return err
}
return nil
})
}
func (r *repository) querySimple(query string, args []interface{}, out interface{}) error {
rows, err := r.tx.Queryx(query, args...)
Errorlint sweep + minor linter tweaks (#1796) * Replace error assertions with Go 1.13 style Use `errors.As(..)` over type assertions. This enables better use of wrapped errors in the future, and lets us pass some errorlint checks in the process. The rewrite is entirely mechanical, and uses a standard idiom for doing so. * Use Go 1.13's errors.Is(..) Rather than directly checking for error equality, use errors.Is(..). This protects against error wrapping issues in the future. Even though something like sql.ErrNoRows doesn't need the wrapping, do so anyway, for the sake of consistency throughout the code base. The change almost lets us pass the `errorlint` Go checker except for a missing case in `js.go` which is to be handled separately; it isn't mechanical, like these changes are. * Remove goconst goconst isn't a useful linter in many cases, because it's false positive rate is high. It's 100% for the current code base. * Avoid direct comparison of errors in recover() Assert that we are catching an error from recover(). If we are, check that the error caught matches errStop. * Enable the "errorlint" checker Configure the checker to avoid checking for errorf wraps. These are often false positives since the suggestion is to blanket wrap errors with %w, and that exposes the underlying API which you might not want to do. The other warnings are good however, and with the current patch stack, the code base passes all these checks as well. * Configure rowserrcheck The project uses sqlx. Configure rowserrcheck to include said package. * Mechanically rewrite a large set of errors Mechanically search for errors that look like fmt.Errorf("...%s", err.Error()) and rewrite those into fmt.Errorf("...%v", err) The `fmt` package is error-aware and knows how to call err.Error() itself. The rationale is that this is more idiomatic Go; it paves the way for using error wrapping later with %w in some sites. This patch only addresses the entirely mechanical rewriting caught by a project-side search/replace. There are more individual sites not addressed by this patch.
2021-10-12 03:03:08 +00:00
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return err
}
defer rows.Close()
if rows.Next() {
if err := rows.Scan(out); err != nil {
return err
}
}
if err := rows.Err(); err != nil {
return err
}
return nil
}
func (r *repository) buildQueryBody(body string, whereClauses []string, havingClauses []string) string {
if len(whereClauses) > 0 {
body = body + " WHERE " + strings.Join(whereClauses, " AND ") // TODO handle AND or OR
}
if len(havingClauses) > 0 {
body = body + " GROUP BY " + r.tableName + ".id "
body = body + " HAVING " + strings.Join(havingClauses, " AND ") // TODO handle AND or OR
}
return body
}
Tag hierarchy (#1519) * Add migration script for tag relations table * Expand hierarchical filter features Expand the features of the hierarchical multi input filter with support for using a relations table, which only has parent_id and child_id columns, and support adding an additional intermediate table to join on, for example for scenes and tags which are linked by the scenes_tags table as well. * Add hierarchical filtering for tags * Add hierarchical tags support to scene markers Refactor filtering of scene markers to filterBuilder and in the process add support for hierarchical tags as well. * List parent and child tags on tag details page * Support setting parent and child tags Add support for setting parent and child tags during tag creation and tag updates. * Validate no loops are created in tags hierarchy * Update tag merging to support tag hierarcy * Add unit tests for tags.EnsureUniqueHierarchy * Fix applying recursive to with clause The SQL `RECURSIVE` of a `WITH` clause only needs to be applied once, imediately after the `WITH`. So this fixes the query building to do just that, automatically applying the `RECURSIVE` keyword when any added with clause is added as recursive. * Rename hierarchical root id column * Rewrite hierarchical filtering for performance Completely rewrite the hierarchical filtering to optimize for performance. Doing the recursive query in combination with a complex query seems to break SQLite optimizing some things which means that the recursive part might be 2,5 second slower than adding a static `VALUES()` list. This is mostly noticable in case of the tag hierarchy where setting an exclusion with any depth (or depth: all) being applied has this performance impact of 2,5 second. "Include" also suffered this issue, but some rewritten query by joining in the *_tags table in one pass and applying a `WHERE x IS NOT NULL` filter did seem to optimize that case. But that optimization isn't applied to the `IS NULL` filter of "exclude". Running a simple query beforehand to get all (recursive) items and then applying them to the query doesn't have this performance penalty. * Remove UI references to child studios and tags * Add parents to tag export * Support importing of parent relationship for tags * Assign stable ids to parent / child badges * Silence Apollo warning on parents/children fields on tags Silence warning triggered by Apollo GraphQL by explicitly instructing it to use the incoming parents/children values. By default it already does this, but it triggers a warning as it might be unintended that it uses the incoming values (instead of for example merging both arrays). Setting merge to false still applies the same behaviour (use only incoming values) but silences the warning as it's explicitly configured to work like this. * Rework detecting unique tag hierarchy Completely rework the unique tag hierarchy to detect invalid hierarchies for which a tag is "added in the middle". So when there are tags A <- B and A <- C, you could previously edit tag B and add tag C as a sub tag without it being noticed as parent A being applied twice (to tag C). While afterwards saving tag C would fail as tag A was applied as parent twice. The updated code correctly detects this scenario as well. Furthermore the error messaging has been reworked a bit and the message now mentions both the direct parent / sub tag as well as the tag which would results in the error. So in aboves example it would now show the message that tag C can't be applied because tag A already is a parent. * Update relations on cached tags when needed Update the relations on cached tags when a tag is created / updated / deleted so these always reflect the correct state. Otherwise (re)opening a tag might still show the old relations untill the page is fully reloaded or the list is navigated. But this obviously is strange when you for example have tag A, create or update tag B to have a relation to tag A, and from tags B page click through to tag A and it doesn't show that it is linked to tag B.
2021-09-09 04:58:43 +00:00
func (r *repository) executeFindQuery(body string, args []interface{}, sortAndPagination string, whereClauses []string, havingClauses []string, withClauses []string, recursiveWith bool) ([]int, int, error) {
body = r.buildQueryBody(body, whereClauses, havingClauses)
withClause := ""
if len(withClauses) > 0 {
Tag hierarchy (#1519) * Add migration script for tag relations table * Expand hierarchical filter features Expand the features of the hierarchical multi input filter with support for using a relations table, which only has parent_id and child_id columns, and support adding an additional intermediate table to join on, for example for scenes and tags which are linked by the scenes_tags table as well. * Add hierarchical filtering for tags * Add hierarchical tags support to scene markers Refactor filtering of scene markers to filterBuilder and in the process add support for hierarchical tags as well. * List parent and child tags on tag details page * Support setting parent and child tags Add support for setting parent and child tags during tag creation and tag updates. * Validate no loops are created in tags hierarchy * Update tag merging to support tag hierarcy * Add unit tests for tags.EnsureUniqueHierarchy * Fix applying recursive to with clause The SQL `RECURSIVE` of a `WITH` clause only needs to be applied once, imediately after the `WITH`. So this fixes the query building to do just that, automatically applying the `RECURSIVE` keyword when any added with clause is added as recursive. * Rename hierarchical root id column * Rewrite hierarchical filtering for performance Completely rewrite the hierarchical filtering to optimize for performance. Doing the recursive query in combination with a complex query seems to break SQLite optimizing some things which means that the recursive part might be 2,5 second slower than adding a static `VALUES()` list. This is mostly noticable in case of the tag hierarchy where setting an exclusion with any depth (or depth: all) being applied has this performance impact of 2,5 second. "Include" also suffered this issue, but some rewritten query by joining in the *_tags table in one pass and applying a `WHERE x IS NOT NULL` filter did seem to optimize that case. But that optimization isn't applied to the `IS NULL` filter of "exclude". Running a simple query beforehand to get all (recursive) items and then applying them to the query doesn't have this performance penalty. * Remove UI references to child studios and tags * Add parents to tag export * Support importing of parent relationship for tags * Assign stable ids to parent / child badges * Silence Apollo warning on parents/children fields on tags Silence warning triggered by Apollo GraphQL by explicitly instructing it to use the incoming parents/children values. By default it already does this, but it triggers a warning as it might be unintended that it uses the incoming values (instead of for example merging both arrays). Setting merge to false still applies the same behaviour (use only incoming values) but silences the warning as it's explicitly configured to work like this. * Rework detecting unique tag hierarchy Completely rework the unique tag hierarchy to detect invalid hierarchies for which a tag is "added in the middle". So when there are tags A <- B and A <- C, you could previously edit tag B and add tag C as a sub tag without it being noticed as parent A being applied twice (to tag C). While afterwards saving tag C would fail as tag A was applied as parent twice. The updated code correctly detects this scenario as well. Furthermore the error messaging has been reworked a bit and the message now mentions both the direct parent / sub tag as well as the tag which would results in the error. So in aboves example it would now show the message that tag C can't be applied because tag A already is a parent. * Update relations on cached tags when needed Update the relations on cached tags when a tag is created / updated / deleted so these always reflect the correct state. Otherwise (re)opening a tag might still show the old relations untill the page is fully reloaded or the list is navigated. But this obviously is strange when you for example have tag A, create or update tag B to have a relation to tag A, and from tags B page click through to tag A and it doesn't show that it is linked to tag B.
2021-09-09 04:58:43 +00:00
var recursive string
if recursiveWith {
recursive = " RECURSIVE "
}
withClause = "WITH " + recursive + strings.Join(withClauses, ", ") + " "
}
countQuery := withClause + r.buildCountQuery(body)
idsQuery := withClause + body + sortAndPagination
// Perform query and fetch result
logger.Tracef("SQL: %s, args: %v", idsQuery, args)
var countResult int
var countErr error
var idsResult []int
var idsErr error
countResult, countErr = r.runCountQuery(countQuery, args)
idsResult, idsErr = r.runIdsQuery(idsQuery, args)
if countErr != nil {
return nil, 0, fmt.Errorf("error executing count query with SQL: %s, args: %v, error: %s", countQuery, args, countErr.Error())
}
if idsErr != nil {
return nil, 0, fmt.Errorf("error executing find query with SQL: %s, args: %v, error: %s", idsQuery, args, idsErr.Error())
}
return idsResult, countResult, nil
}
func (r *repository) newQuery() queryBuilder {
return queryBuilder{
repository: r,
}
}
func (r *repository) join(j joiner, as string, parentIDCol string) {
t := r.tableName
if as != "" {
t = as
}
j.addLeftJoin(r.tableName, as, fmt.Sprintf("%s.%s = %s", t, r.idColumn, parentIDCol))
}
//nolint:golint,unused
func (r *repository) innerJoin(j joiner, as string, parentIDCol string) {
t := r.tableName
if as != "" {
t = as
}
j.addInnerJoin(r.tableName, as, fmt.Sprintf("%s.%s = %s", t, r.idColumn, parentIDCol))
}
type joiner interface {
addLeftJoin(table, as, onClause string)
addInnerJoin(table, as, onClause string)
}
type joinRepository struct {
repository
fkColumn string
}
func (r *joinRepository) getIDs(id int) ([]int, error) {
query := fmt.Sprintf(`SELECT %s as id from %s WHERE %s = ?`, r.fkColumn, r.tableName, r.idColumn)
return r.runIdsQuery(query, []interface{}{id})
}
func (r *joinRepository) insert(id, foreignID int) (sql.Result, error) {
stmt := fmt.Sprintf("INSERT INTO %s (%s, %s) VALUES (?, ?)", r.tableName, r.idColumn, r.fkColumn)
return r.tx.Exec(stmt, id, foreignID)
}
func (r *joinRepository) replace(id int, foreignIDs []int) error {
if err := r.destroy([]int{id}); err != nil {
return err
}
for _, fk := range foreignIDs {
if _, err := r.insert(id, fk); err != nil {
return err
}
}
return nil
}
type imageRepository struct {
repository
imageColumn string
}
func (r *imageRepository) get(id int) ([]byte, error) {
query := fmt.Sprintf("SELECT %s from %s WHERE %s = ?", r.imageColumn, r.tableName, r.idColumn)
var ret []byte
err := r.querySimple(query, []interface{}{id}, &ret)
return ret, err
}
func (r *imageRepository) replace(id int, image []byte) error {
if err := r.destroy([]int{id}); err != nil {
return err
}
stmt := fmt.Sprintf("INSERT INTO %s (%s, %s) VALUES (?, ?)", r.tableName, r.idColumn, r.imageColumn)
_, err := r.tx.Exec(stmt, id, image)
return err
}
type captionRepository struct {
repository
}
func (r *captionRepository) get(id int) ([]*models.SceneCaption, error) {
query := fmt.Sprintf("SELECT %s, %s, %s from %s WHERE %s = ?", sceneCaptionCodeColumn, sceneCaptionFilenameColumn, sceneCaptionTypeColumn, r.tableName, r.idColumn)
var ret []*models.SceneCaption
err := r.queryFunc(query, []interface{}{id}, false, func(rows *sqlx.Rows) error {
var captionCode string
var captionFilename string
var captionType string
if err := rows.Scan(&captionCode, &captionFilename, &captionType); err != nil {
return err
}
caption := &models.SceneCaption{
LanguageCode: captionCode,
Filename: captionFilename,
CaptionType: captionType,
}
ret = append(ret, caption)
return nil
})
return ret, err
}
func (r *captionRepository) insert(id int, caption *models.SceneCaption) (sql.Result, error) {
stmt := fmt.Sprintf("INSERT INTO %s (%s, %s, %s, %s) VALUES (?, ?, ?, ?)", r.tableName, r.idColumn, sceneCaptionCodeColumn, sceneCaptionFilenameColumn, sceneCaptionTypeColumn)
return r.tx.Exec(stmt, id, caption.LanguageCode, caption.Filename, caption.CaptionType)
}
func (r *captionRepository) replace(id int, captions []*models.SceneCaption) error {
if err := r.destroy([]int{id}); err != nil {
return err
}
for _, caption := range captions {
if _, err := r.insert(id, caption); err != nil {
return err
}
}
return nil
}
type stringRepository struct {
repository
stringColumn string
}
func (r *stringRepository) get(id int) ([]string, error) {
query := fmt.Sprintf("SELECT %s from %s WHERE %s = ?", r.stringColumn, r.tableName, r.idColumn)
var ret []string
err := r.queryFunc(query, []interface{}{id}, false, func(rows *sqlx.Rows) error {
var out string
if err := rows.Scan(&out); err != nil {
return err
}
ret = append(ret, out)
return nil
})
return ret, err
}
func (r *stringRepository) insert(id int, s string) (sql.Result, error) {
stmt := fmt.Sprintf("INSERT INTO %s (%s, %s) VALUES (?, ?)", r.tableName, r.idColumn, r.stringColumn)
return r.tx.Exec(stmt, id, s)
}
func (r *stringRepository) replace(id int, newStrings []string) error {
if err := r.destroy([]int{id}); err != nil {
return err
}
for _, s := range newStrings {
if _, err := r.insert(id, s); err != nil {
return err
}
}
return nil
}
type stashIDRepository struct {
repository
}
type stashIDs []*models.StashID
func (s *stashIDs) Append(o interface{}) {
*s = append(*s, o.(*models.StashID))
}
func (s *stashIDs) New() interface{} {
return &models.StashID{}
}
func (r *stashIDRepository) get(id int) ([]*models.StashID, error) {
query := fmt.Sprintf("SELECT stash_id, endpoint from %s WHERE %s = ?", r.tableName, r.idColumn)
var ret stashIDs
err := r.query(query, []interface{}{id}, &ret)
return []*models.StashID(ret), err
}
func (r *stashIDRepository) replace(id int, newIDs []models.StashID) error {
if err := r.destroy([]int{id}); err != nil {
return err
}
query := fmt.Sprintf("INSERT INTO %s (%s, endpoint, stash_id) VALUES (?, ?, ?)", r.tableName, r.idColumn)
for _, stashID := range newIDs {
_, err := r.tx.Exec(query, id, stashID.Endpoint, stashID.StashID)
if err != nil {
return err
}
}
return nil
}
func listKeys(i interface{}, addPrefix bool) string {
var query []string
v := reflect.ValueOf(i)
for i := 0; i < v.NumField(); i++ {
// Get key for struct tag
rawKey := v.Type().Field(i).Tag.Get("db")
key := strings.Split(rawKey, ",")[0]
if key == "id" {
continue
}
if addPrefix {
key = ":" + key
}
query = append(query, key)
}
return strings.Join(query, ", ")
}
func updateSet(i interface{}, partial bool) string {
var query []string
v := reflect.ValueOf(i)
for i := 0; i < v.NumField(); i++ {
// Get key for struct tag
rawKey := v.Type().Field(i).Tag.Get("db")
key := strings.Split(rawKey, ",")[0]
if key == "id" {
continue
}
add := true
if partial {
reflectValue := reflect.ValueOf(v.Field(i).Interface())
add = !reflectValue.IsNil()
}
if add {
query = append(query, fmt.Sprintf("%s=:%s", key, key))
}
}
return strings.Join(query, ", ")
}
func updateSetMap(m map[string]interface{}) string {
var query []string
for k := range m {
query = append(query, fmt.Sprintf("%s=:%s", k, k))
}
return strings.Join(query, ", ")
}