Errcheck phase 1 (#1715)

* Avoid redundant logging in migrations

Return the error and let the caller handle the logging of the error if
needed.

While here, defer m.Close() to the function boundary.

* Treat errors as values

Use %v rather than %s and pass the errors directly.

* Generate a wrapped error on stat-failure

* Log 3 unchecked errors

Rather than ignore errors, log them at
the WARNING log level.

The server has been functioning without these, so assume they are not at
the ERROR level.

* Propagate errors upward

Failure in path generation was ignored. Propagate the errors upward the
call stack, so it can be handled at the level of orchestration.

* Warn on errors

Log errors rather than quenching them.

Errors are logged at the Warn-level for now.

* Check error when creating test databases

Use the builtin log package and stop the program fatally on error.

* Add warnings to uncheck task errors

Focus on the task system in a single commit, logging unchecked
errors as warnings.

* Warn-on-error in API routes

Look through the API routes, and make sure errors are being logged if
they occur. Prefer the Warn-log-level because none of these has proven
to be fatal in the system up until now.

* Propagate error when adding Util API

* Propagate error on adding util API

* Return unhandled error

* JS log API: propagate and log errors

* JS Plugins: log GQL addition failures.

* Warn on failure to write to stdin

* Warn on failure to stop task

* Wrap viper.BindEnv

The current viper code only errors if no name is provided, so it should
never fail. Rewrite the code flow to factor through a panic-function.

This removes error warnings from this part of the code.

* Log errors in concurrency test

If we can't initialize the configuration, treat the test as a failure.

* Warn on errors in configuration code

* Plug an unchecked error in gallery zip walking

* Warn on screenshot serving failure

* Warn on encoder screenshot failure

* Warn on errors in path-handling code

* Undo the errcheck on configurations for now.

* Use one-line initializers where applicable

rather than using

  err := f()
  if err!= nil { ..

prefer the shorter

  if err := f(); err != nil { ..

If f() isn't too long of a name, or wraps a function with a body.
This commit is contained in:
SmallCoccinelle
2021-09-21 01:34:25 +02:00
committed by GitHub
parent af6232ec97
commit 87709fd018
33 changed files with 296 additions and 107 deletions

View File

@@ -16,6 +16,7 @@ import (
"github.com/spf13/viper"
"github.com/stashapp/stash/pkg/logger"
"github.com/stashapp/stash/pkg/manager/paths"
"github.com/stashapp/stash/pkg/models"
"github.com/stashapp/stash/pkg/utils"
@@ -453,7 +454,10 @@ func (i *Instance) GetStashBoxes() []*models.StashBox {
i.RLock()
defer i.RUnlock()
var boxes []*models.StashBox
viper.UnmarshalKey(StashBoxes, &boxes)
if err := viper.UnmarshalKey(StashBoxes, &boxes); err != nil {
logger.Warnf("error in unmarshalkey: %v", err)
}
return boxes
}
@@ -801,7 +805,9 @@ func (i *Instance) SetCSS(css string) {
buf := []byte(css)
ioutil.WriteFile(fn, buf, 0777)
if err := ioutil.WriteFile(fn, buf, 0777); err != nil {
logger.Warnf("error while writing %v bytes to %v: %v", len(buf), fn, err)
}
}
func (i *Instance) GetCSSEnabled() bool {

View File

@@ -13,9 +13,9 @@ func TestConcurrentConfigAccess(t *testing.T) {
//const loops = 1000
const loops = 200
var wg sync.WaitGroup
for t := 0; t < workers; t++ {
for k := 0; k < workers; k++ {
wg.Add(1)
go func() {
go func(wk int) {
for l := 0; l < loops; l++ {
i.SetInitialConfig()
@@ -93,7 +93,7 @@ func TestConcurrentConfigAccess(t *testing.T) {
i.Set(MaxUploadSize, i.GetMaxUploadSize())
}
wg.Done()
}()
}(k)
}
wg.Wait()

View File

@@ -112,16 +112,22 @@ func initFlags() flagStruct {
}
func initEnvs() {
viper.SetEnvPrefix("stash") // will be uppercased automatically
viper.BindEnv("host") // STASH_HOST
viper.BindEnv("port") // STASH_PORT
viper.BindEnv("external_host") // STASH_EXTERNAL_HOST
viper.BindEnv("generated") // STASH_GENERATED
viper.BindEnv("metadata") // STASH_METADATA
viper.BindEnv("cache") // STASH_CACHE
viper.SetEnvPrefix("stash") // will be uppercased automatically
bindEnv("host") // STASH_HOST
bindEnv("port") // STASH_PORT
bindEnv("external_host") // STASH_EXTERNAL_HOST
bindEnv("generated") // STASH_GENERATED
bindEnv("metadata") // STASH_METADATA
bindEnv("cache") // STASH_CACHE
// only set stash config flag if not already set
if instance.GetStashPaths() == nil {
viper.BindEnv("stash") // STASH_STASH
bindEnv("stash") // STASH_STASH
}
}
func bindEnv(key string) {
if err := viper.BindEnv(key); err != nil {
panic(fmt.Sprintf("unable to set environment key (%v): %v", key, err))
}
}

View File

@@ -61,10 +61,13 @@ func walkGalleryZip(path string, walkFunc func(file *zip.File) error) error {
func countImagesInZip(path string) int {
ret := 0
walkGalleryZip(path, func(file *zip.File) error {
err := walkGalleryZip(path, func(file *zip.File) error {
ret++
return nil
})
if err != nil {
logger.Warnf("Error while walking gallery zip: %v", err)
}
return ret
}

View File

@@ -163,7 +163,9 @@ func (s *singleton) Generate(ctx context.Context, input models.GenerateMetadataI
if err := s.validateFFMPEG(); err != nil {
return 0, err
}
instance.Paths.Generated.EnsureTmpDir()
if err := instance.Paths.Generated.EnsureTmpDir(); err != nil {
logger.Warnf("could not generate temporary directory: %v", err)
}
sceneIDs, err := utils.StringSliceToIntSlice(input.SceneIDs)
if err != nil {
@@ -250,14 +252,18 @@ func (s *singleton) Generate(ctx context.Context, input models.GenerateMetadataI
// Start measuring how long the generate has taken. (consider moving this up)
start := time.Now()
instance.Paths.Generated.EnsureTmpDir()
if err = instance.Paths.Generated.EnsureTmpDir(); err != nil {
logger.Warnf("could not create temprary directory: %v", err)
}
for _, scene := range scenes {
progress.Increment()
if job.IsCancelled(ctx) {
logger.Info("Stopping due to user request")
wg.Wait()
instance.Paths.Generated.EmptyTmpDir()
if err := instance.Paths.Generated.EmptyTmpDir(); err != nil {
logger.Warnf("failure emptying temporary directory: %v", err)
}
return
}
@@ -340,7 +346,9 @@ func (s *singleton) Generate(ctx context.Context, input models.GenerateMetadataI
if job.IsCancelled(ctx) {
logger.Info("Stopping due to user request")
wg.Wait()
instance.Paths.Generated.EmptyTmpDir()
if err := instance.Paths.Generated.EmptyTmpDir(); err != nil {
logger.Warnf("failure emptying temporary directory: %v", err)
}
elapsed := time.Since(start)
logger.Info(fmt.Sprintf("Generate finished (%s)", elapsed))
return
@@ -365,7 +373,9 @@ func (s *singleton) Generate(ctx context.Context, input models.GenerateMetadataI
wg.Wait()
instance.Paths.Generated.EmptyTmpDir()
if err = instance.Paths.Generated.EmptyTmpDir(); err != nil {
logger.Warnf("failure emptying temporary directory: %v", err)
}
elapsed := time.Since(start)
logger.Info(fmt.Sprintf("Generate finished (%s)", elapsed))
})
@@ -383,7 +393,9 @@ func (s *singleton) GenerateScreenshot(ctx context.Context, sceneId string, at f
// generate default screenshot if at is nil
func (s *singleton) generateScreenshot(ctx context.Context, sceneId string, at *float64) int {
instance.Paths.Generated.EnsureTmpDir()
if err := instance.Paths.Generated.EnsureTmpDir(); err != nil {
logger.Warnf("failure generating screenshot: %v", err)
}
j := job.MakeJobExec(func(ctx context.Context, progress *job.Progress) {
sceneIdInt, err := strconv.Atoi(sceneId)

View File

@@ -5,6 +5,7 @@ import (
"io/ioutil"
"path/filepath"
"github.com/stashapp/stash/pkg/logger"
"github.com/stashapp/stash/pkg/utils"
)
@@ -37,26 +38,30 @@ func (gp *generatedPaths) GetTmpPath(fileName string) string {
return filepath.Join(gp.Tmp, fileName)
}
func (gp *generatedPaths) EnsureTmpDir() {
utils.EnsureDir(gp.Tmp)
func (gp *generatedPaths) EnsureTmpDir() error {
return utils.EnsureDir(gp.Tmp)
}
func (gp *generatedPaths) EmptyTmpDir() {
utils.EmptyDir(gp.Tmp)
func (gp *generatedPaths) EmptyTmpDir() error {
return utils.EmptyDir(gp.Tmp)
}
func (gp *generatedPaths) RemoveTmpDir() {
utils.RemoveDir(gp.Tmp)
func (gp *generatedPaths) RemoveTmpDir() error {
return utils.RemoveDir(gp.Tmp)
}
func (gp *generatedPaths) TempDir(pattern string) (string, error) {
gp.EnsureTmpDir()
if err := gp.EnsureTmpDir(); err != nil {
logger.Warnf("Could not ensure existence of a temporary directory: %v", err)
}
ret, err := ioutil.TempDir(gp.Tmp, pattern)
if err != nil {
return "", err
}
utils.EmptyDir(ret)
if err = utils.EmptyDir(ret); err != nil {
logger.Warnf("could not recursively empty dir: %v", err)
}
return ret, nil
}

View File

@@ -3,6 +3,7 @@ package paths
import (
"path/filepath"
"github.com/stashapp/stash/pkg/logger"
"github.com/stashapp/stash/pkg/utils"
)
@@ -43,14 +44,30 @@ func GetJSONPaths(baseDir string) *JSONPaths {
func EnsureJSONDirs(baseDir string) {
jsonPaths := GetJSONPaths(baseDir)
utils.EnsureDir(jsonPaths.Metadata)
utils.EnsureDir(jsonPaths.Scenes)
utils.EnsureDir(jsonPaths.Images)
utils.EnsureDir(jsonPaths.Galleries)
utils.EnsureDir(jsonPaths.Performers)
utils.EnsureDir(jsonPaths.Studios)
utils.EnsureDir(jsonPaths.Movies)
utils.EnsureDir(jsonPaths.Tags)
if err := utils.EnsureDir(jsonPaths.Metadata); err != nil {
logger.Warnf("couldn't create directories for Metadata: %v", err)
}
if err := utils.EnsureDir(jsonPaths.Scenes); err != nil {
logger.Warnf("couldn't create directories for Scenes: %v", err)
}
if err := utils.EnsureDir(jsonPaths.Images); err != nil {
logger.Warnf("couldn't create directories for Images: %v", err)
}
if err := utils.EnsureDir(jsonPaths.Galleries); err != nil {
logger.Warnf("couldn't create directories for Galleries: %v", err)
}
if err := utils.EnsureDir(jsonPaths.Performers); err != nil {
logger.Warnf("couldn't create directories for Performers: %v", err)
}
if err := utils.EnsureDir(jsonPaths.Studios); err != nil {
logger.Warnf("couldn't create directories for Studios: %v", err)
}
if err := utils.EnsureDir(jsonPaths.Movies); err != nil {
logger.Warnf("couldn't create directories for Movies: %v", err)
}
if err := utils.EnsureDir(jsonPaths.Tags); err != nil {
logger.Warnf("couldn't create directories for Tags: %v", err)
}
}
func (jp *JSONPaths) PerformerJSONPath(checksum string) string {

View File

@@ -91,10 +91,16 @@ func (s *SceneServer) ServeScreenshot(scene *models.Scene, w http.ResponseWriter
http.ServeFile(w, r, filepath)
} else {
var cover []byte
s.TXNManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error {
err := s.TXNManager.WithReadTxn(r.Context(), func(repo models.ReaderRepository) error {
cover, _ = repo.Scene().GetCover(scene.ID)
return nil
})
utils.ServeImage(cover, w, r)
if err != nil {
logger.Warnf("read transaction failed while serving screenshot: %v", err)
}
if err = utils.ServeImage(cover, w, r); err != nil {
logger.Warnf("unable to serve screenshot image: %v", err)
}
}
}

View File

@@ -2,6 +2,7 @@ package manager
import (
"github.com/stashapp/stash/pkg/ffmpeg"
"github.com/stashapp/stash/pkg/logger"
)
func makeScreenshot(probeResult ffmpeg.VideoFile, outputPath string, quality int, width int, time float64) {
@@ -12,5 +13,8 @@ func makeScreenshot(probeResult ffmpeg.VideoFile, outputPath string, quality int
Time: time,
Width: width,
}
encoder.Screenshot(probeResult, options)
if err := encoder.Screenshot(probeResult, options); err != nil {
logger.Warnf("[encoder] failure to generate screenshot: %v", err)
}
}

View File

@@ -126,7 +126,7 @@ func (t *ExportTask) Start(wg *sync.WaitGroup) {
paths.EnsureJSONDirs(t.baseDir)
t.txnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
txnErr := t.txnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
// include movie scenes and gallery images
if !t.full {
// only include movie scenes if includeDependencies is also set
@@ -154,6 +154,9 @@ func (t *ExportTask) Start(wg *sync.WaitGroup) {
return nil
})
if txnErr != nil {
logger.Warnf("error while running export transaction: %v", txnErr)
}
if err := t.json.saveMappings(t.Mappings); err != nil {
logger.Errorf("[mappings] failed to save json: %s", err.Error())
@@ -202,17 +205,24 @@ func (t *ExportTask) zipFiles(w io.Writer) error {
return err
}
filepath.Walk(t.json.json.Tags, t.zipWalkFunc(u.json.Tags, z))
filepath.Walk(t.json.json.Galleries, t.zipWalkFunc(u.json.Galleries, z))
filepath.Walk(t.json.json.Performers, t.zipWalkFunc(u.json.Performers, z))
filepath.Walk(t.json.json.Studios, t.zipWalkFunc(u.json.Studios, z))
filepath.Walk(t.json.json.Movies, t.zipWalkFunc(u.json.Movies, z))
filepath.Walk(t.json.json.Scenes, t.zipWalkFunc(u.json.Scenes, z))
filepath.Walk(t.json.json.Images, t.zipWalkFunc(u.json.Images, z))
walkWarn(t.json.json.Tags, t.zipWalkFunc(u.json.Tags, z))
walkWarn(t.json.json.Galleries, t.zipWalkFunc(u.json.Galleries, z))
walkWarn(t.json.json.Performers, t.zipWalkFunc(u.json.Performers, z))
walkWarn(t.json.json.Studios, t.zipWalkFunc(u.json.Studios, z))
walkWarn(t.json.json.Movies, t.zipWalkFunc(u.json.Movies, z))
walkWarn(t.json.json.Scenes, t.zipWalkFunc(u.json.Scenes, z))
walkWarn(t.json.json.Images, t.zipWalkFunc(u.json.Images, z))
return nil
}
// like filepath.Walk but issue a warning on error
func walkWarn(root string, fn filepath.WalkFunc) {
if err := filepath.Walk(root, fn); err != nil {
logger.Warnf("error walking structure %v: %v", root, err)
}
}
func (t *ExportTask) zipWalkFunc(outDir string, z *zip.Writer) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {

View File

@@ -160,7 +160,9 @@ func (t *ImportTask) unzipFile() error {
fn := filepath.Join(t.BaseDir, f.Name)
if f.FileInfo().IsDir() {
os.MkdirAll(fn, os.ModePerm)
if err := os.MkdirAll(fn, os.ModePerm); err != nil {
logger.Warnf("couldn't create directory %v while unzipping import file: %v", fn, err)
}
continue
}

View File

@@ -87,7 +87,9 @@ func (j *ScanJob) Execute(ctx context.Context, progress *job.Progress) {
galleries = append(galleries, path)
}
instance.Paths.Generated.EnsureTmpDir()
if err := instance.Paths.Generated.EnsureTmpDir(); err != nil {
logger.Warnf("couldn't create temporary directory: %v", err)
}
wg.Add()
task := ScanTask{
@@ -126,7 +128,11 @@ func (j *ScanJob) Execute(ctx context.Context, progress *job.Progress) {
}
wg.Wait()
instance.Paths.Generated.EmptyTmpDir()
if err := instance.Paths.Generated.EmptyTmpDir(); err != nil {
logger.Warnf("couldn't empty temporary directory: %v", err)
}
elapsed := time.Since(start)
logger.Info(fmt.Sprintf("Scan finished (%s)", elapsed))
@@ -753,7 +759,7 @@ func (t *ScanTask) scanScene() *models.Scene {
// check for scene by checksum and oshash - MD5 should be
// redundant, but check both
t.TxnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
txnErr := t.TxnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
qb := r.Scene()
if checksum != "" {
s, _ = qb.FindByChecksum(checksum)
@@ -765,6 +771,9 @@ func (t *ScanTask) scanScene() *models.Scene {
return nil
})
if txnErr != nil {
logger.Warnf("error in read transaction: %v", txnErr)
}
sceneHash := oshash
@@ -1320,7 +1329,7 @@ func (t *ScanTask) doesPathExist() bool {
gExt := config.GetGalleryExtensions()
ret := false
t.TxnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
txnErr := t.TxnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
if matchExtension(t.FilePath, gExt) {
gallery, _ := r.Gallery().FindByPath(t.FilePath)
if gallery != nil {
@@ -1340,6 +1349,9 @@ func (t *ScanTask) doesPathExist() bool {
return nil
})
if txnErr != nil {
logger.Warnf("error while executing read transaction: %v", txnErr)
}
return ret
}

View File

@@ -47,7 +47,7 @@ func (t *StashBoxPerformerTagTask) stashBoxPerformerTag() {
if t.refresh {
var performerID string
t.txnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
txnErr := t.txnManager.WithReadTxn(context.TODO(), func(r models.ReaderRepository) error {
stashids, _ := r.Performer().GetStashIDs(t.performer.ID)
for _, id := range stashids {
if id.Endpoint == t.box.Endpoint {
@@ -56,6 +56,9 @@ func (t *StashBoxPerformerTagTask) stashBoxPerformerTag() {
}
return nil
})
if txnErr != nil {
logger.Warnf("error while executing read transaction: %v", err)
}
if performerID != "" {
performer, err = client.FindStashBoxPerformerByID(performerID)
}