mirror of
https://github.com/stashapp/stash.git
synced 2025-12-16 20:07:05 +03:00
Lint checks phase 2 (#1747)
* Log 3 unchecked errors Rather than ignore errors, log them at the WARNING log level. The server has been functioning without these, so assume they are not at the ERROR level. * Log errors in concurrency test If we can't initialize the configuration, treat the test as a failure. * Undo the errcheck on configurations for now. * Handle unchecked errors in pkg/manager * Resolve unchecked errors * Handle DLNA/DMS unchecked errors * Handle error checking in concurrency test Generalize config initialization, so we can initialize a configuration without writing it to disk. Use this in the test case, since otherwise the test fails to write. * Handle the remaining unchecked errors * Heed gosimple in update test * Use one-line if-initializer statements While here, fix a wrong variable capture error. * testing.T doesn't support %w use %v instead which is supported. * Remove unused query builder functions The Int/String criterion handler functions are now generalized. Thus, there's no need to keep these functions around anymore. * Mark filterBuilder.addRecursiveWith nolint The function is useful in the future and no other refactors are looking nice. Keep the function around, but tell the linter to ignore it. * Remove utils.Btoi There are no users of this utility function * Return error on scan failure If we fail to scan the row when looking for the unique checksum index, then report the error upwards. * Fix comments on exported functions * Fix typos * Fix startup error
This commit is contained in:
@@ -2,6 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -105,7 +106,9 @@ func (r *mutationResolver) BackupDatabase(ctx context.Context, input models.Back
|
||||
mgr := manager.GetInstance()
|
||||
var backupPath string
|
||||
if download {
|
||||
utils.EnsureDir(mgr.Paths.Generated.Downloads)
|
||||
if err := utils.EnsureDir(mgr.Paths.Generated.Downloads); err != nil {
|
||||
return nil, fmt.Errorf("could not create backup directory %v: %w", mgr.Paths.Generated.Downloads, err)
|
||||
}
|
||||
f, err := ioutil.TempFile(mgr.Paths.Generated.Downloads, "backup*.sqlite")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -2,6 +2,7 @@ package database
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
@@ -26,7 +27,9 @@ func createImagesChecksumIndex() error {
|
||||
|
||||
if err == nil {
|
||||
var found bool
|
||||
row.Scan(&found)
|
||||
if err := row.Scan(&found); err != nil && err != sql.ErrNoRows {
|
||||
return fmt.Errorf("error while scanning for index: %w", err)
|
||||
}
|
||||
if found {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -151,7 +151,10 @@ func Reset(databasePath string) error {
|
||||
}
|
||||
}
|
||||
|
||||
Initialize(databasePath)
|
||||
if err := Initialize(databasePath); err != nil {
|
||||
return fmt.Errorf("[reset DB] unable to initialize: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -256,7 +259,9 @@ func RunMigrations() error {
|
||||
}
|
||||
|
||||
// re-initialise the database
|
||||
Initialize(dbPath)
|
||||
if err = Initialize(dbPath); err != nil {
|
||||
logger.Warnf("Error re-initializing the database: %v", err)
|
||||
}
|
||||
|
||||
// run a vacuum on the database
|
||||
logger.Info("Performing vacuum on database")
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
)
|
||||
|
||||
// WithTxn executes the provided function within a transaction. It rolls back
|
||||
@@ -17,11 +18,15 @@ func WithTxn(fn func(tx *sqlx.Tx) error) error {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
// a panic occurred, rollback and repanic
|
||||
tx.Rollback()
|
||||
if err := tx.Rollback(); err != nil {
|
||||
logger.Warnf("failure when performing transaction rollback: %v", err)
|
||||
}
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
// something went wrong, rollback
|
||||
tx.Rollback()
|
||||
if err := tx.Rollback(); err != nil {
|
||||
logger.Warnf("failure when performing transaction rollback: %v", err)
|
||||
}
|
||||
} else {
|
||||
// all good, commit
|
||||
err = tx.Commit()
|
||||
|
||||
@@ -416,7 +416,7 @@ func (me *Server) serveIcon(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
var scene *models.Scene
|
||||
me.txnManager.WithReadTxn(context.Background(), func(r models.ReaderRepository) error {
|
||||
err := me.txnManager.WithReadTxn(context.Background(), func(r models.ReaderRepository) error {
|
||||
idInt, err := strconv.Atoi(sceneId)
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -424,6 +424,9 @@ func (me *Server) serveIcon(w http.ResponseWriter, r *http.Request) {
|
||||
scene, _ = r.Scene().Find(idInt)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Warnf("failed to execute read transaction while trying to serve an icon: %v", err)
|
||||
}
|
||||
|
||||
if scene == nil {
|
||||
return
|
||||
@@ -552,7 +555,7 @@ func (me *Server) initMux(mux *http.ServeMux) {
|
||||
mux.HandleFunc(resPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
sceneId := r.URL.Query().Get("scene")
|
||||
var scene *models.Scene
|
||||
me.txnManager.WithReadTxn(context.Background(), func(r models.ReaderRepository) error {
|
||||
err := me.txnManager.WithReadTxn(context.Background(), func(r models.ReaderRepository) error {
|
||||
sceneIdInt, err := strconv.Atoi(sceneId)
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -560,6 +563,9 @@ func (me *Server) initMux(mux *http.ServeMux) {
|
||||
scene, _ = r.Scene().Find(sceneIdInt)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
logger.Warnf("failed to execute read transaction for scene id (%v): %v", sceneId, err)
|
||||
}
|
||||
|
||||
if scene == nil {
|
||||
return
|
||||
@@ -571,7 +577,9 @@ func (me *Server) initMux(mux *http.ServeMux) {
|
||||
w.Header().Set("content-type", `text/xml; charset="utf-8"`)
|
||||
w.Header().Set("content-length", fmt.Sprint(len(me.rootDescXML)))
|
||||
w.Header().Set("server", serverField)
|
||||
w.Write(me.rootDescXML)
|
||||
if k, err := w.Write(me.rootDescXML); err != nil {
|
||||
logger.Warnf("could not write rootDescXML (wrote %v bytes of %v): %v", k, len(me.rootDescXML), err)
|
||||
}
|
||||
})
|
||||
handleSCPDs(mux)
|
||||
mux.HandleFunc(serviceControlURL, me.serviceControlHandler)
|
||||
|
||||
@@ -63,7 +63,9 @@ func KillRunningEncoders(path string) {
|
||||
for _, process := range processes {
|
||||
// assume it worked, don't check for error
|
||||
logger.Infof("Killing encoder process for file: %s", path)
|
||||
process.Kill()
|
||||
if err := process.Kill(); err != nil {
|
||||
logger.Warnf("failed to kill process %v: %v", process.Pid, err)
|
||||
}
|
||||
|
||||
// wait for the process to die before returning
|
||||
// don't wait more than a few seconds
|
||||
|
||||
@@ -32,7 +32,9 @@ func (s *Stream) Serve(w http.ResponseWriter, r *http.Request) {
|
||||
notify := r.Context().Done()
|
||||
go func() {
|
||||
<-notify
|
||||
s.Process.Kill()
|
||||
if err := s.Process.Kill(); err != nil {
|
||||
logger.Warnf("unable to kill os process %v: %v", s.Process.Pid, err)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err := io.Copy(w, s.Stdout)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/utils"
|
||||
_ "golang.org/x/image/webp"
|
||||
@@ -252,7 +253,9 @@ func Serve(w http.ResponseWriter, r *http.Request, path string) {
|
||||
return
|
||||
}
|
||||
|
||||
w.Write(data)
|
||||
if k, err := w.Write(data); err != nil {
|
||||
logger.Warnf("failure while serving image (wrote %v bytes out of %v): %v", k, len(data), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -962,8 +962,7 @@ func (i *Instance) SetChecksumDefaultValues(defaultAlgorithm models.HashAlgorith
|
||||
viper.SetDefault(CalculateMD5, usingMD5)
|
||||
}
|
||||
|
||||
func (i *Instance) setDefaultValues() error {
|
||||
|
||||
func (i *Instance) setDefaultValues(write bool) error {
|
||||
// read data before write lock scope
|
||||
defaultDatabaseFilePath := i.GetDefaultDatabaseFilePath()
|
||||
defaultScrapersPath := i.GetDefaultScrapersPath()
|
||||
@@ -989,11 +988,24 @@ func (i *Instance) setDefaultValues() error {
|
||||
// Set default scrapers and plugins paths
|
||||
viper.SetDefault(ScrapersPath, defaultScrapersPath)
|
||||
viper.SetDefault(PluginsPath, defaultPluginsPath)
|
||||
return viper.WriteConfig()
|
||||
if write {
|
||||
return viper.WriteConfig()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetInitialConfig fills in missing required config fields
|
||||
func (i *Instance) SetInitialConfig() error {
|
||||
return i.setInitialConfig(true)
|
||||
}
|
||||
|
||||
// SetInitialMemoryConfig fills in missing required config fields without writing the configuration
|
||||
func (i *Instance) SetInitialMemoryConfig() error {
|
||||
return i.setInitialConfig(false)
|
||||
}
|
||||
|
||||
func (i *Instance) setInitialConfig(write bool) error {
|
||||
// generate some api keys
|
||||
const apiKeyLength = 32
|
||||
|
||||
@@ -1007,7 +1019,7 @@ func (i *Instance) SetInitialConfig() error {
|
||||
i.Set(SessionStoreKey, sessionStoreKey)
|
||||
}
|
||||
|
||||
return i.setDefaultValues()
|
||||
return i.setDefaultValues(write)
|
||||
}
|
||||
|
||||
func (i *Instance) FinalizeSetup() {
|
||||
|
||||
@@ -17,7 +17,9 @@ func TestConcurrentConfigAccess(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func(wk int) {
|
||||
for l := 0; l < loops; l++ {
|
||||
i.SetInitialConfig()
|
||||
if err := i.SetInitialMemoryConfig(); err != nil {
|
||||
t.Errorf("Failure setting initial configuration in worker %v iteration %v: %v", wk, l, err)
|
||||
}
|
||||
|
||||
i.HasCredentials()
|
||||
i.GetCPUProfilePath()
|
||||
|
||||
@@ -110,11 +110,15 @@ func Initialize() *singleton {
|
||||
logger.Warnf("config file %snot found. Assuming new system...", cfgFile)
|
||||
}
|
||||
|
||||
initFFMPEG()
|
||||
if err = initFFMPEG(); err != nil {
|
||||
logger.Warnf("could not initialize FFMPEG subsystem: %v", err)
|
||||
}
|
||||
|
||||
// if DLNA is enabled, start it now
|
||||
if instance.Config.GetDLNADefaultEnabled() {
|
||||
instance.DLNAService.Start(nil)
|
||||
if err := instance.DLNAService.Start(nil); err != nil {
|
||||
logger.Warnf("could not start DLNA service: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
@@ -134,7 +138,9 @@ func initProfiling(cpuProfilePath string) {
|
||||
logger.Infof("profiling to %s", cpuProfilePath)
|
||||
|
||||
// StopCPUProfile is defer called in main
|
||||
pprof.StartCPUProfile(f)
|
||||
if err = pprof.StartCPUProfile(f); err != nil {
|
||||
logger.Warnf("could not start CPU profiling: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func initFFMPEG() error {
|
||||
@@ -182,7 +188,9 @@ func initLog() {
|
||||
// configuration has been set. Should only be called if the configuration
|
||||
// is valid.
|
||||
func (s *singleton) PostInit() error {
|
||||
s.Config.SetInitialConfig()
|
||||
if err := s.Config.SetInitialConfig(); err != nil {
|
||||
logger.Warnf("could not set initial configuration: %v", err)
|
||||
}
|
||||
|
||||
s.Paths = paths.NewPaths(s.Config.GetGeneratedPath())
|
||||
s.RefreshConfig()
|
||||
@@ -201,8 +209,12 @@ func (s *singleton) PostInit() error {
|
||||
const deleteTimeout = 1 * time.Second
|
||||
|
||||
utils.Timeout(func() {
|
||||
utils.EmptyDir(instance.Paths.Generated.Downloads)
|
||||
utils.EmptyDir(instance.Paths.Generated.Tmp)
|
||||
if err := utils.EmptyDir(instance.Paths.Generated.Downloads); err != nil {
|
||||
logger.Warnf("could not empty Downloads directory: %v", err)
|
||||
}
|
||||
if err := utils.EmptyDir(instance.Paths.Generated.Tmp); err != nil {
|
||||
logger.Warnf("could not empty Tmp directory: %v", err)
|
||||
}
|
||||
}, deleteTimeout, func(done chan struct{}) {
|
||||
logger.Info("Please wait. Deleting temporary files...") // print
|
||||
<-done // and wait for deletion
|
||||
@@ -236,11 +248,21 @@ func (s *singleton) RefreshConfig() {
|
||||
s.Paths = paths.NewPaths(s.Config.GetGeneratedPath())
|
||||
config := s.Config
|
||||
if config.Validate() == nil {
|
||||
utils.EnsureDir(s.Paths.Generated.Screenshots)
|
||||
utils.EnsureDir(s.Paths.Generated.Vtt)
|
||||
utils.EnsureDir(s.Paths.Generated.Markers)
|
||||
utils.EnsureDir(s.Paths.Generated.Transcodes)
|
||||
utils.EnsureDir(s.Paths.Generated.Downloads)
|
||||
if err := utils.EnsureDir(s.Paths.Generated.Screenshots); err != nil {
|
||||
logger.Warnf("could not create directory for Screenshots: %v", err)
|
||||
}
|
||||
if err := utils.EnsureDir(s.Paths.Generated.Vtt); err != nil {
|
||||
logger.Warnf("could not create directory for VTT: %v", err)
|
||||
}
|
||||
if err := utils.EnsureDir(s.Paths.Generated.Markers); err != nil {
|
||||
logger.Warnf("could not create directory for Markers: %v", err)
|
||||
}
|
||||
if err := utils.EnsureDir(s.Paths.Generated.Transcodes); err != nil {
|
||||
logger.Warnf("could not create directory for Transcodes: %v", err)
|
||||
}
|
||||
if err := utils.EnsureDir(s.Paths.Generated.Downloads); err != nil {
|
||||
logger.Warnf("could not create directory for Downloads: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -304,7 +326,9 @@ func (s *singleton) Setup(input models.SetupInput) error {
|
||||
|
||||
s.Config.FinalizeSetup()
|
||||
|
||||
initFFMPEG()
|
||||
if err := initFFMPEG(); err != nil {
|
||||
return fmt.Errorf("error initializing FFMPEG subsystem: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -174,7 +174,9 @@ func (t *ExportTask) Start(wg *sync.WaitGroup) {
|
||||
|
||||
func (t *ExportTask) generateDownload() error {
|
||||
// zip the files and register a download link
|
||||
utils.EnsureDir(instance.Paths.Generated.Downloads)
|
||||
if err := utils.EnsureDir(instance.Paths.Generated.Downloads); err != nil {
|
||||
return err
|
||||
}
|
||||
z, err := ioutil.TempFile(instance.Paths.Generated.Downloads, "export*.zip")
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -78,7 +78,9 @@ func (t *GenerateMarkersTask) generateSceneMarkers() {
|
||||
|
||||
// Make the folder for the scenes markers
|
||||
markersFolder := filepath.Join(instance.Paths.Generated.Markers, sceneHash)
|
||||
utils.EnsureDir(markersFolder)
|
||||
if err := utils.EnsureDir(markersFolder); err != nil {
|
||||
logger.Warnf("could not create the markers folder (%v): %v", markersFolder, err)
|
||||
}
|
||||
|
||||
for i, sceneMarker := range sceneMarkers {
|
||||
index := i + 1
|
||||
|
||||
@@ -153,7 +153,7 @@ func (t *StashBoxPerformerTagTask) stashBoxPerformerTag() {
|
||||
partial.URL = &value
|
||||
}
|
||||
|
||||
t.txnManager.WithTxn(context.TODO(), func(r models.Repository) error {
|
||||
txnErr := t.txnManager.WithTxn(context.TODO(), func(r models.Repository) error {
|
||||
_, err := r.Performer().Update(partial)
|
||||
|
||||
if !t.refresh {
|
||||
@@ -188,6 +188,9 @@ func (t *StashBoxPerformerTagTask) stashBoxPerformerTag() {
|
||||
}
|
||||
return err
|
||||
})
|
||||
if txnErr != nil {
|
||||
logger.Warnf("failure to execute partial update of performer: %v", err)
|
||||
}
|
||||
} else if t.name != nil && performer.Name != nil {
|
||||
currentTime := time.Now()
|
||||
newPerformer := models.Performer{
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
package models
|
||||
|
||||
import "context"
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
)
|
||||
|
||||
type Transaction interface {
|
||||
Begin() error
|
||||
@@ -30,11 +34,15 @@ func WithTxn(txn Transaction, fn func(r Repository) error) error {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
// a panic occurred, rollback and repanic
|
||||
txn.Rollback()
|
||||
if err := txn.Rollback(); err != nil {
|
||||
logger.Warnf("error while trying to roll back transaction: %v", err)
|
||||
}
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
// something went wrong, rollback
|
||||
txn.Rollback()
|
||||
if err := txn.Rollback(); err != nil {
|
||||
logger.Warnf("error while trying to roll back transaction: %v", err)
|
||||
}
|
||||
} else {
|
||||
// all good, commit
|
||||
err = txn.Commit()
|
||||
@@ -54,11 +62,15 @@ func WithROTxn(txn ReadTransaction, fn func(r ReaderRepository) error) error {
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
// a panic occurred, rollback and repanic
|
||||
txn.Rollback()
|
||||
if err := txn.Rollback(); err != nil {
|
||||
logger.Warnf("error while trying to roll back RO transaction: %v", err)
|
||||
}
|
||||
panic(p)
|
||||
} else if err != nil {
|
||||
// something went wrong, rollback
|
||||
txn.Rollback()
|
||||
if err := txn.Rollback(); err != nil {
|
||||
logger.Warnf("error while trying to roll back RO transaction: %v", err)
|
||||
}
|
||||
} else {
|
||||
// all good, commit
|
||||
err = txn.Commit()
|
||||
|
||||
@@ -48,7 +48,9 @@ func (s *scriptScraper) runScraperScript(inString string, out interface{}) error
|
||||
go func() {
|
||||
defer stdin.Close()
|
||||
|
||||
io.WriteString(stdin, inString)
|
||||
if n, err := io.WriteString(stdin, inString); err != nil {
|
||||
logger.Warnf("failure to write full input to script (wrote %v bytes out of %v): %v", n, len(inString), err)
|
||||
}
|
||||
}()
|
||||
|
||||
stderr, err := cmd.StderrPipe()
|
||||
|
||||
@@ -235,7 +235,9 @@ func (s *xpathScraper) loadURL(url string) (*html.Node, error) {
|
||||
|
||||
if err == nil && s.config.DebugOptions != nil && s.config.DebugOptions.PrintHTML {
|
||||
var b bytes.Buffer
|
||||
html.Render(&b, ret)
|
||||
if err := html.Render(&b, ret); err != nil {
|
||||
logger.Warnf("could not render HTML: %v", err)
|
||||
}
|
||||
logger.Infof("loadURL (%s) response: \n%s", url, b.String())
|
||||
}
|
||||
|
||||
|
||||
@@ -193,6 +193,7 @@ func (f *filterBuilder) addWith(sql string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// addRecursiveWith adds a with clause and arguments to the filter, and sets it to recursive
|
||||
//nolint:unused
|
||||
func (f *filterBuilder) addRecursiveWith(sql string, args ...interface{}) {
|
||||
if sql == "" {
|
||||
return
|
||||
|
||||
@@ -1,11 +1,7 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
)
|
||||
|
||||
type queryBuilder struct {
|
||||
@@ -139,67 +135,3 @@ func (qb *queryBuilder) addFilter(f *filterBuilder) {
|
||||
|
||||
qb.addJoins(f.getAllJoins()...)
|
||||
}
|
||||
|
||||
func (qb *queryBuilder) handleIntCriterionInput(c *models.IntCriterionInput, column string) {
|
||||
if c != nil {
|
||||
clause, args := getIntCriterionWhereClause(column, *c)
|
||||
qb.addWhere(clause)
|
||||
qb.addArg(args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *queryBuilder) handleStringCriterionInput(c *models.StringCriterionInput, column string) {
|
||||
if c != nil {
|
||||
if modifier := c.Modifier; c.Modifier.IsValid() {
|
||||
switch modifier {
|
||||
case models.CriterionModifierIncludes:
|
||||
clause, thisArgs := getSearchBinding([]string{column}, c.Value, false)
|
||||
qb.addWhere(clause)
|
||||
qb.addArg(thisArgs...)
|
||||
case models.CriterionModifierExcludes:
|
||||
clause, thisArgs := getSearchBinding([]string{column}, c.Value, true)
|
||||
qb.addWhere(clause)
|
||||
qb.addArg(thisArgs...)
|
||||
case models.CriterionModifierEquals:
|
||||
qb.addWhere(column + " LIKE ?")
|
||||
qb.addArg(c.Value)
|
||||
case models.CriterionModifierNotEquals:
|
||||
qb.addWhere(column + " NOT LIKE ?")
|
||||
qb.addArg(c.Value)
|
||||
case models.CriterionModifierMatchesRegex:
|
||||
if _, err := regexp.Compile(c.Value); err != nil {
|
||||
qb.err = err
|
||||
return
|
||||
}
|
||||
qb.addWhere(fmt.Sprintf("(%s IS NOT NULL AND %[1]s regexp ?)", column))
|
||||
qb.addArg(c.Value)
|
||||
case models.CriterionModifierNotMatchesRegex:
|
||||
if _, err := regexp.Compile(c.Value); err != nil {
|
||||
qb.err = err
|
||||
return
|
||||
}
|
||||
qb.addWhere(fmt.Sprintf("(%s IS NULL OR %[1]s NOT regexp ?)", column))
|
||||
qb.addArg(c.Value)
|
||||
case models.CriterionModifierIsNull:
|
||||
qb.addWhere("(" + column + " IS NULL OR TRIM(" + column + ") = '')")
|
||||
case models.CriterionModifierNotNull:
|
||||
qb.addWhere("(" + column + " IS NOT NULL AND TRIM(" + column + ") != '')")
|
||||
default:
|
||||
clause, count := getSimpleCriterionClause(modifier, "?")
|
||||
qb.addWhere(column + " " + clause)
|
||||
if count == 1 {
|
||||
qb.addArg(c.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *queryBuilder) handleCountCriterion(countFilter *models.IntCriterionInput, primaryTable, joinTable, primaryFK string) {
|
||||
if countFilter != nil {
|
||||
clause, args := getCountCriterionClause(primaryTable, joinTable, primaryFK, *countFilter)
|
||||
|
||||
qb.addWhere(clause)
|
||||
qb.addArg(args...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,12 @@ package tag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/models/mocks"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testUniqueHierarchyTags = map[int]*models.Tag{
|
||||
@@ -211,8 +212,7 @@ func testEnsureUniqueHierarchy(t *testing.T, tc testUniqueHierarchyCase, queryPa
|
||||
mockTagReader := &mocks.TagReaderWriter{}
|
||||
|
||||
var parentIDs, childIDs []int
|
||||
var find map[int]*models.Tag
|
||||
find = make(map[int]*models.Tag)
|
||||
find := make(map[int]*models.Tag)
|
||||
if tc.parents != nil {
|
||||
parentIDs = make([]int, 0)
|
||||
for _, parent := range tc.parents {
|
||||
@@ -262,7 +262,7 @@ func testEnsureUniqueHierarchy(t *testing.T, tc testUniqueHierarchyCase, queryPa
|
||||
}
|
||||
return nil
|
||||
}, func(tagID int, excludeIDs []int) error {
|
||||
for id, _ := range tc.onFindAllAncestors {
|
||||
for id := range tc.onFindAllAncestors {
|
||||
if id == tagID {
|
||||
return nil
|
||||
}
|
||||
@@ -278,7 +278,7 @@ func testEnsureUniqueHierarchy(t *testing.T, tc testUniqueHierarchyCase, queryPa
|
||||
}
|
||||
return nil
|
||||
}, func(tagID int, excludeIDs []int) error {
|
||||
for id, _ := range tc.onFindAllDescendants {
|
||||
for id := range tc.onFindAllDescendants {
|
||||
if id == tagID {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
package utils
|
||||
|
||||
// Btoi transforms a boolean to an int. 1 for true, false otherwise
|
||||
func Btoi(b bool) int {
|
||||
if b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// IsTrue returns true if the bool pointer is not nil and true.
|
||||
func IsTrue(b *bool) bool {
|
||||
return b != nil && *b
|
||||
|
||||
@@ -12,7 +12,7 @@ func TestOshashEmpty(t *testing.T) {
|
||||
want := "0000000000000000"
|
||||
got, err := oshash(size, head, tail)
|
||||
if err != nil {
|
||||
t.Errorf("TestOshashEmpty: Error from oshash: %w", err)
|
||||
t.Errorf("TestOshashEmpty: Error from oshash: %v", err)
|
||||
}
|
||||
if got != want {
|
||||
t.Errorf("TestOshashEmpty: oshash(0, 0, 0) = %q; want %q", got, want)
|
||||
|
||||
Reference in New Issue
Block a user