[Files Refactor] Performance tuning (#2813)

* Do database txn in same thread. Retry on locked db
* Remove captions from slimscenedata
* Fix tracing
* Use where in instead of individual selects
* Remove scenes_query view
* Remove image query view
* Remove gallery query view
* Use where in for FindMany
* Don't interrupt scanning zip files
* Fix image filesize sort
This commit is contained in:
WithoutPants
2022-08-11 16:14:57 +10:00
parent 87167736f6
commit 9b31b20fed
19 changed files with 715 additions and 680 deletions

View File

@@ -9,10 +9,6 @@ fragment SlimSceneData on Scene {
organized organized
interactive interactive
interactive_speed interactive_speed
captions {
language_code
caption_type
}
files { files {
...VideoFileData ...VideoFileData

View File

@@ -15,7 +15,11 @@ import (
"github.com/stashapp/stash/pkg/txn" "github.com/stashapp/stash/pkg/txn"
) )
const scanQueueSize = 200000 const (
scanQueueSize = 200000
// maximum number of times to retry in the event of a locked database
maxRetries = 1000
)
// Repository provides access to storage methods for files and folders. // Repository provides access to storage methods for files and folders.
type Repository struct { type Repository struct {
@@ -86,7 +90,7 @@ type scanJob struct {
zipPathToID sync.Map zipPathToID sync.Map
count int count int
txnMutex sync.Mutex txnRetryer txn.Retryer
} }
// ScanOptions provides options for scanning files. // ScanOptions provides options for scanning files.
@@ -113,6 +117,10 @@ func (s *Scanner) Scan(ctx context.Context, handlers []Handler, options ScanOpti
handlers: handlers, handlers: handlers,
ProgressReports: progressReporter, ProgressReports: progressReporter,
options: options, options: options,
txnRetryer: txn.Retryer{
Manager: s.Repository,
Retries: maxRetries,
},
} }
job.execute(ctx) job.execute(ctx)
@@ -126,10 +134,7 @@ type scanFile struct {
} }
func (s *scanJob) withTxn(ctx context.Context, fn func(ctx context.Context) error) error { func (s *scanJob) withTxn(ctx context.Context, fn func(ctx context.Context) error) error {
// get exclusive access to the database return s.txnRetryer.WithTxn(ctx, fn)
s.txnMutex.Lock()
defer s.txnMutex.Unlock()
return txn.WithTxn(ctx, s.Repository, fn)
} }
func (s *scanJob) withDB(ctx context.Context, fn func(ctx context.Context) error) error { func (s *scanJob) withDB(ctx context.Context, fn func(ctx context.Context) error) error {
@@ -157,20 +162,6 @@ func (s *scanJob) execute(ctx context.Context) {
logger.Infof("Finished adding files to queue. %d files queued", s.count) logger.Infof("Finished adding files to queue. %d files queued", s.count)
}() }()
done := make(chan struct{}, 1)
go func() {
if err := s.processDBOperations(ctx); err != nil {
if errors.Is(err, context.Canceled) {
return
}
logger.Errorf("error processing database operations for scan: %v", err)
}
close(done)
}()
if err := s.processQueue(ctx); err != nil { if err := s.processQueue(ctx); err != nil {
if errors.Is(err, context.Canceled) { if errors.Is(err, context.Canceled) {
return return
@@ -179,9 +170,6 @@ func (s *scanJob) execute(ctx context.Context) {
logger.Errorf("error scanning files: %v", err) logger.Errorf("error scanning files: %v", err)
return return
} }
// wait for database operations to complete
<-done
} }
func (s *scanJob) queueFiles(ctx context.Context, paths []string) error { func (s *scanJob) queueFiles(ctx context.Context, paths []string) error {
@@ -247,7 +235,10 @@ func (s *scanJob) queueFileFunc(ctx context.Context, f FS, zipFile *scanFile) fs
if info.IsDir() { if info.IsDir() {
// handle folders immediately // handle folders immediately
if err := s.handleFolder(ctx, ff); err != nil { if err := s.handleFolder(ctx, ff); err != nil {
logger.Errorf("error processing %q: %v", path, err) if !errors.Is(err, context.Canceled) {
logger.Errorf("error processing %q: %v", path, err)
}
// skip the directory since we won't be able to process the files anyway // skip the directory since we won't be able to process the files anyway
return fs.SkipDir return fs.SkipDir
} }
@@ -259,7 +250,9 @@ func (s *scanJob) queueFileFunc(ctx context.Context, f FS, zipFile *scanFile) fs
if zipFile != nil { if zipFile != nil {
s.ProgressReports.ExecuteTask("Scanning "+path, func() { s.ProgressReports.ExecuteTask("Scanning "+path, func() {
if err := s.handleFile(ctx, ff); err != nil { if err := s.handleFile(ctx, ff); err != nil {
logger.Errorf("error processing %q: %v", path, err) if !errors.Is(err, context.Canceled) {
logger.Errorf("error processing %q: %v", path, err)
}
// don't return an error, just skip the file // don't return an error, just skip the file
} }
}) })
@@ -355,18 +348,6 @@ func (s *scanJob) incrementProgress() {
} }
} }
func (s *scanJob) processDBOperations(ctx context.Context) error {
for fn := range s.dbQueue {
if err := ctx.Err(); err != nil {
return err
}
_ = s.withTxn(ctx, fn)
}
return nil
}
func (s *scanJob) processQueueItem(ctx context.Context, f scanFile) { func (s *scanJob) processQueueItem(ctx context.Context, f scanFile) {
s.ProgressReports.ExecuteTask("Scanning "+f.Path, func() { s.ProgressReports.ExecuteTask("Scanning "+f.Path, func() {
var err error var err error
@@ -376,7 +357,7 @@ func (s *scanJob) processQueueItem(ctx context.Context, f scanFile) {
err = s.handleFile(ctx, f) err = s.handleFile(ctx, f)
} }
if err != nil { if err != nil && !errors.Is(err, context.Canceled) {
logger.Errorf("error processing %q: %v", f.Path, err) logger.Errorf("error processing %q: %v", f.Path, err)
} }
}) })
@@ -552,7 +533,13 @@ func (s *scanJob) handleFile(ctx context.Context, f scanFile) error {
if ff != nil && s.isZipFile(f.info.Name()) { if ff != nil && s.isZipFile(f.info.Name()) {
f.BaseFile = ff.Base() f.BaseFile = ff.Base()
if err := s.scanZipFile(ctx, f); err != nil {
// scan zip files with a different context that is not cancellable
// cancelling while scanning zip file contents results in the scan
// contents being partially completed
zipCtx := context.Background()
if err := s.scanZipFile(zipCtx, f); err != nil {
logger.Errorf("Error scanning zip file %q: %v", f.Path, err) logger.Errorf("Error scanning zip file %q: %v", f.Path, err)
} }
} }
@@ -638,7 +625,7 @@ func (s *scanJob) onNewFile(ctx context.Context, f scanFile) (File, error) {
} }
// if not renamed, queue file for creation // if not renamed, queue file for creation
if err := s.queueDBOperation(ctx, path, func(ctx context.Context) error { if err := s.withTxn(ctx, func(ctx context.Context) error {
if err := s.Repository.Create(ctx, file); err != nil { if err := s.Repository.Create(ctx, file); err != nil {
return fmt.Errorf("creating file %q: %w", path, err) return fmt.Errorf("creating file %q: %w", path, err)
} }
@@ -655,17 +642,6 @@ func (s *scanJob) onNewFile(ctx context.Context, f scanFile) (File, error) {
return file, nil return file, nil
} }
func (s *scanJob) queueDBOperation(ctx context.Context, path string, fn func(ctx context.Context) error) error {
// perform immediately if it is a zip file
if s.isZipFile(path) {
return s.withTxn(ctx, fn)
}
s.dbQueue <- fn
return nil
}
func (s *scanJob) fireDecorators(ctx context.Context, fs FS, f File) (File, error) { func (s *scanJob) fireDecorators(ctx context.Context, fs FS, f File) (File, error) {
for _, h := range s.FileDecorators { for _, h := range s.FileDecorators {
var err error var err error
@@ -782,7 +758,7 @@ func (s *scanJob) handleRename(ctx context.Context, f File, fp []Fingerprint) (F
fBase.CreatedAt = otherBase.CreatedAt fBase.CreatedAt = otherBase.CreatedAt
fBase.Fingerprints = otherBase.Fingerprints fBase.Fingerprints = otherBase.Fingerprints
if err := s.queueDBOperation(ctx, fBase.Path, func(ctx context.Context) error { if err := s.withTxn(ctx, func(ctx context.Context) error {
if err := s.Repository.Update(ctx, f); err != nil { if err := s.Repository.Update(ctx, f); err != nil {
return fmt.Errorf("updating file for rename %q: %w", fBase.Path, err) return fmt.Errorf("updating file for rename %q: %w", fBase.Path, err)
} }
@@ -831,7 +807,7 @@ func (s *scanJob) onExistingFile(ctx context.Context, f scanFile, existing File)
return nil, nil return nil, nil
} }
if err := s.queueDBOperation(ctx, path, func(ctx context.Context) error { if err := s.withTxn(ctx, func(ctx context.Context) error {
if err := s.fireHandlers(ctx, existing); err != nil { if err := s.fireHandlers(ctx, existing); err != nil {
return err return err
} }
@@ -866,7 +842,7 @@ func (s *scanJob) onExistingFile(ctx context.Context, f scanFile, existing File)
} }
// queue file for update // queue file for update
if err := s.queueDBOperation(ctx, path, func(ctx context.Context) error { if err := s.withTxn(ctx, func(ctx context.Context) error {
if err := s.Repository.Update(ctx, existing); err != nil { if err := s.Repository.Update(ctx, existing); err != nil {
return fmt.Errorf("updating file %q: %w", path, err) return fmt.Errorf("updating file %q: %w", path, err)
} }

View File

@@ -31,6 +31,10 @@ func (*TxnManager) AddPostCommitHook(ctx context.Context, hook txn.TxnFunc) {
func (*TxnManager) AddPostRollbackHook(ctx context.Context, hook txn.TxnFunc) { func (*TxnManager) AddPostRollbackHook(ctx context.Context, hook txn.TxnFunc) {
} }
func (*TxnManager) IsLocked(err error) bool {
return false
}
func (*TxnManager) Reset() error { func (*TxnManager) Reset() error {
return nil return nil
} }

View File

@@ -75,13 +75,18 @@ type Database struct {
} }
func NewDatabase() *Database { func NewDatabase() *Database {
return &Database{ fileStore := NewFileStore()
File: NewFileStore(), folderStore := NewFolderStore()
Folder: NewFolderStore(),
Image: NewImageStore(), ret := &Database{
Gallery: NewGalleryStore(), File: fileStore,
Scene: NewSceneStore(), Folder: folderStore,
Scene: NewSceneStore(fileStore),
Image: NewImageStore(fileStore),
Gallery: NewGalleryStore(fileStore, folderStore),
} }
return ret
} }
// Ready returns an error if the database is not ready to begin transactions. // Ready returns an error if the database is not ready to begin transactions.

View File

@@ -250,12 +250,6 @@ func (r *fileQueryRow) appendRelationships(i *file.BaseFile) {
} }
} }
func mergeFiles(dest file.File, src file.File) {
if src.Base().Fingerprints != nil {
dest.Base().Fingerprints = appendFingerprintsUnique(dest.Base().Fingerprints, src.Base().Fingerprints...)
}
}
type fileQueryRows []fileQueryRow type fileQueryRows []fileQueryRow
func (r fileQueryRows) resolve() []file.File { func (r fileQueryRows) resolve() []file.File {
@@ -279,11 +273,6 @@ func (r fileQueryRows) resolve() []file.File {
return ret return ret
} }
type relatedFileQueryRow struct {
fileQueryRow
Primary null.Bool `db:"primary"`
}
type FileStore struct { type FileStore struct {
repository repository

View File

@@ -59,6 +59,23 @@ func (r *galleryRow) fromGallery(o models.Gallery) {
r.UpdatedAt = o.UpdatedAt r.UpdatedAt = o.UpdatedAt
} }
func (r *galleryRow) resolve() *models.Gallery {
return &models.Gallery{
ID: r.ID,
Title: r.Title.String,
URL: r.URL.String,
Date: r.Date.DatePtr(),
Details: r.Details.String,
Rating: nullIntPtr(r.Rating),
Organized: r.Organized,
StudioID: nullIntPtr(r.StudioID),
FolderID: nullIntFolderIDPtr(r.FolderID),
// FolderPath: r.FolderPath.String,
CreatedAt: r.CreatedAt,
UpdatedAt: r.UpdatedAt,
}
}
type galleryRowRecord struct { type galleryRowRecord struct {
updateRecord updateRecord
} }
@@ -75,113 +92,24 @@ func (r *galleryRowRecord) fromPartial(o models.GalleryPartial) {
r.setTime("updated_at", o.UpdatedAt) r.setTime("updated_at", o.UpdatedAt)
} }
type galleryQueryRow struct {
galleryRow
relatedFileQueryRow
FolderPath null.String `db:"folder_path"`
SceneID null.Int `db:"scene_id"`
TagID null.Int `db:"tag_id"`
PerformerID null.Int `db:"performer_id"`
}
func (r *galleryQueryRow) resolve() *models.Gallery {
ret := &models.Gallery{
ID: r.ID,
Title: r.Title.String,
URL: r.URL.String,
Date: r.Date.DatePtr(),
Details: r.Details.String,
Rating: nullIntPtr(r.Rating),
Organized: r.Organized,
StudioID: nullIntPtr(r.StudioID),
FolderID: nullIntFolderIDPtr(r.FolderID),
FolderPath: r.FolderPath.String,
CreatedAt: r.CreatedAt,
UpdatedAt: r.UpdatedAt,
}
r.appendRelationships(ret)
return ret
}
func appendFileUnique(vs []file.File, toAdd file.File, isPrimary bool) []file.File {
// check in reverse, since it's most likely to be the last one
for i := len(vs) - 1; i >= 0; i-- {
if vs[i].Base().ID == toAdd.Base().ID {
// merge the two
mergeFiles(vs[i], toAdd)
return vs
}
}
if !isPrimary {
return append(vs, toAdd)
}
// primary should be first
return append([]file.File{toAdd}, vs...)
}
func (r *galleryQueryRow) appendRelationships(i *models.Gallery) {
if r.TagID.Valid {
i.TagIDs = intslice.IntAppendUnique(i.TagIDs, int(r.TagID.Int64))
}
if r.PerformerID.Valid {
i.PerformerIDs = intslice.IntAppendUnique(i.PerformerIDs, int(r.PerformerID.Int64))
}
if r.SceneID.Valid {
i.SceneIDs = intslice.IntAppendUnique(i.SceneIDs, int(r.SceneID.Int64))
}
if r.relatedFileQueryRow.FileID.Valid {
f := r.fileQueryRow.resolve()
i.Files = appendFileUnique(i.Files, f, r.Primary.Bool)
}
}
type galleryQueryRows []galleryQueryRow
func (r galleryQueryRows) resolve() []*models.Gallery {
var ret []*models.Gallery
var last *models.Gallery
var lastID int
for _, row := range r {
if last == nil || lastID != row.ID {
f := row.resolve()
last = f
lastID = row.ID
ret = append(ret, last)
continue
}
// must be merging with previous row
row.appendRelationships(last)
}
return ret
}
type GalleryStore struct { type GalleryStore struct {
repository repository
tableMgr *table tableMgr *table
queryTableMgr *table
fileStore *FileStore
folderStore *FolderStore
} }
func NewGalleryStore() *GalleryStore { func NewGalleryStore(fileStore *FileStore, folderStore *FolderStore) *GalleryStore {
return &GalleryStore{ return &GalleryStore{
repository: repository{ repository: repository{
tableName: galleryTable, tableName: galleryTable,
idColumn: idColumn, idColumn: idColumn,
}, },
tableMgr: galleryTableMgr, tableMgr: galleryTableMgr,
queryTableMgr: galleryQueryTableMgr, fileStore: fileStore,
folderStore: folderStore,
} }
} }
@@ -189,10 +117,6 @@ func (qb *GalleryStore) table() exp.IdentifierExpression {
return qb.tableMgr.table return qb.tableMgr.table
} }
func (qb *GalleryStore) queryTable() exp.IdentifierExpression {
return qb.queryTableMgr.table
}
func (qb *GalleryStore) Create(ctx context.Context, newObject *models.Gallery, fileIDs []file.ID) error { func (qb *GalleryStore) Create(ctx context.Context, newObject *models.Gallery, fileIDs []file.ID) error {
var r galleryRow var r galleryRow
r.fromGallery(*newObject) r.fromGallery(*newObject)
@@ -298,7 +222,7 @@ func (qb *GalleryStore) Destroy(ctx context.Context, id int) error {
} }
func (qb *GalleryStore) selectDataset() *goqu.SelectDataset { func (qb *GalleryStore) selectDataset() *goqu.SelectDataset {
return dialect.From(galleriesQueryTable).Select(galleriesQueryTable.All()) return dialect.From(qb.table()).Select(qb.table().All())
} }
func (qb *GalleryStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Gallery, error) { func (qb *GalleryStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Gallery, error) {
@@ -316,24 +240,88 @@ func (qb *GalleryStore) get(ctx context.Context, q *goqu.SelectDataset) (*models
func (qb *GalleryStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Gallery, error) { func (qb *GalleryStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Gallery, error) {
const single = false const single = false
var rows galleryQueryRows var ret []*models.Gallery
if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error {
var f galleryQueryRow var f galleryRow
if err := r.StructScan(&f); err != nil { if err := r.StructScan(&f); err != nil {
return err return err
} }
rows = append(rows, f) s := f.resolve()
if err := qb.resolveRelationships(ctx, s); err != nil {
return err
}
ret = append(ret, s)
return nil return nil
}); err != nil { }); err != nil {
return nil, err return nil, err
} }
return rows.resolve(), nil return ret, nil
}
func (qb *GalleryStore) resolveRelationships(ctx context.Context, s *models.Gallery) error {
var err error
// files
s.Files, err = qb.getFiles(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving gallery files: %w", err)
}
// folder
if s.FolderID != nil {
folder, err := qb.folderStore.Find(ctx, *s.FolderID)
if err != nil {
return fmt.Errorf("resolving gallery folder: %w", err)
}
s.FolderPath = folder.Path
}
// performers
s.PerformerIDs, err = qb.performersRepository().getIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving gallery performers: %w", err)
}
// tags
s.TagIDs, err = qb.tagsRepository().getIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving gallery tags: %w", err)
}
// scenes
s.SceneIDs, err = qb.scenesRepository().getIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving gallery scenes: %w", err)
}
return nil
}
func (qb *GalleryStore) getFiles(ctx context.Context, id int) ([]file.File, error) {
fileIDs, err := qb.filesRepository().get(ctx, id)
if err != nil {
return nil, err
}
// use fileStore to load files
files, err := qb.fileStore.Find(ctx, fileIDs...)
if err != nil {
return nil, err
}
ret := make([]file.File, len(files))
copy(ret, files)
return ret, nil
} }
func (qb *GalleryStore) Find(ctx context.Context, id int) (*models.Gallery, error) { func (qb *GalleryStore) Find(ctx context.Context, id int) (*models.Gallery, error) {
q := qb.selectDataset().Where(qb.queryTableMgr.byID(id)) q := qb.selectDataset().Where(qb.tableMgr.byID(id))
ret, err := qb.get(ctx, q) ret, err := qb.get(ctx, q)
if err != nil { if err != nil {
@@ -344,25 +332,30 @@ func (qb *GalleryStore) Find(ctx context.Context, id int) (*models.Gallery, erro
} }
func (qb *GalleryStore) FindMany(ctx context.Context, ids []int) ([]*models.Gallery, error) { func (qb *GalleryStore) FindMany(ctx context.Context, ids []int) ([]*models.Gallery, error) {
var galleries []*models.Gallery q := qb.selectDataset().Prepared(true).Where(qb.table().Col(idColumn).In(ids))
for _, id := range ids { unsorted, err := qb.getMany(ctx, q)
gallery, err := qb.Find(ctx, id) if err != nil {
if err != nil { return nil, err
return nil, err }
}
if gallery == nil { galleries := make([]*models.Gallery, len(ids))
return nil, fmt.Errorf("gallery with id %d not found", id)
}
galleries = append(galleries, gallery) for _, s := range unsorted {
i := intslice.IntIndex(ids, s.ID)
galleries[i] = s
}
for i := range galleries {
if galleries[i] == nil {
return nil, fmt.Errorf("gallery with id %d not found", ids[i])
}
} }
return galleries, nil return galleries, nil
} }
func (qb *GalleryStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Gallery, error) { func (qb *GalleryStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Gallery, error) {
table := qb.queryTable() table := qb.table()
q := qb.selectDataset().Prepared(true).Where( q := qb.selectDataset().Prepared(true).Where(
table.Col(idColumn).Eq( table.Col(idColumn).Eq(
@@ -986,6 +979,16 @@ func (qb *GalleryStore) setGallerySort(query *queryBuilder, findFilter *models.F
} }
} }
func (qb *GalleryStore) filesRepository() *filesRepository {
return &filesRepository{
repository: repository{
tx: qb.tx,
tableName: galleriesFilesTable,
idColumn: galleryIDColumn,
},
}
}
func (qb *GalleryStore) performersRepository() *joinRepository { func (qb *GalleryStore) performersRepository() *joinRepository {
return &joinRepository{ return &joinRepository{
repository: repository{ repository: repository{
@@ -1027,3 +1030,14 @@ func (qb *GalleryStore) UpdateImages(ctx context.Context, galleryID int, imageID
// Delete the existing joins and then create new ones // Delete the existing joins and then create new ones
return qb.imagesRepository().replace(ctx, galleryID, imageIDs) return qb.imagesRepository().replace(ctx, galleryID, imageIDs)
} }
func (qb *GalleryStore) scenesRepository() *joinRepository {
return &joinRepository{
repository: repository{
tx: qb.tx,
tableName: galleriesScenesTable,
idColumn: galleryIDColumn,
},
fkColumn: sceneIDColumn,
}
}

View File

@@ -51,6 +51,7 @@ func Test_galleryQueryBuilder_Create(t *testing.T) {
SceneIDs: []int{sceneIDs[sceneIdx1WithPerformer], sceneIDs[sceneIdx1WithStudio]}, SceneIDs: []int{sceneIDs[sceneIdx1WithPerformer], sceneIDs[sceneIdx1WithStudio]},
TagIDs: []int{tagIDs[tagIdx1WithScene], tagIDs[tagIdx1WithDupName]}, TagIDs: []int{tagIDs[tagIdx1WithScene], tagIDs[tagIdx1WithDupName]},
PerformerIDs: []int{performerIDs[performerIdx1WithScene], performerIDs[performerIdx1WithDupName]}, PerformerIDs: []int{performerIDs[performerIdx1WithScene], performerIDs[performerIdx1WithDupName]},
Files: []file.File{},
}, },
false, false,
}, },
@@ -202,9 +203,12 @@ func Test_galleryQueryBuilder_Update(t *testing.T) {
Files: []file.File{ Files: []file.File{
makeGalleryFileWithID(galleryIdxWithImage), makeGalleryFileWithID(galleryIdxWithImage),
}, },
Organized: true, SceneIDs: []int{},
CreatedAt: createdAt, TagIDs: []int{},
UpdatedAt: updatedAt, PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
}, },
false, false,
}, },
@@ -215,9 +219,12 @@ func Test_galleryQueryBuilder_Update(t *testing.T) {
Files: []file.File{ Files: []file.File{
makeGalleryFileWithID(galleryIdxWithScene), makeGalleryFileWithID(galleryIdxWithScene),
}, },
Organized: true, SceneIDs: []int{},
CreatedAt: createdAt, TagIDs: []int{},
UpdatedAt: updatedAt, PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
}, },
false, false,
}, },
@@ -228,9 +235,12 @@ func Test_galleryQueryBuilder_Update(t *testing.T) {
Files: []file.File{ Files: []file.File{
makeGalleryFileWithID(galleryIdxWithTag), makeGalleryFileWithID(galleryIdxWithTag),
}, },
Organized: true, SceneIDs: []int{},
CreatedAt: createdAt, TagIDs: []int{},
UpdatedAt: updatedAt, PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
}, },
false, false,
}, },
@@ -241,9 +251,12 @@ func Test_galleryQueryBuilder_Update(t *testing.T) {
Files: []file.File{ Files: []file.File{
makeGalleryFileWithID(galleryIdxWithPerformer), makeGalleryFileWithID(galleryIdxWithPerformer),
}, },
Organized: true, SceneIDs: []int{},
CreatedAt: createdAt, TagIDs: []int{},
UpdatedAt: updatedAt, PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
}, },
false, false,
}, },
@@ -428,6 +441,9 @@ func Test_galleryQueryBuilder_UpdatePartial(t *testing.T) {
Files: []file.File{ Files: []file.File{
makeGalleryFile(galleryIdxWithImage), makeGalleryFile(galleryIdxWithImage),
}, },
SceneIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
}, },
false, false,
}, },
@@ -621,7 +637,9 @@ func Test_galleryQueryBuilder_UpdatePartialRelationships(t *testing.T) {
Mode: models.RelationshipUpdateModeRemove, Mode: models.RelationshipUpdateModeRemove,
}, },
}, },
models.Gallery{}, models.Gallery{
SceneIDs: []int{},
},
false, false,
}, },
{ {

View File

@@ -48,6 +48,19 @@ func (r *imageRow) fromImage(i models.Image) {
r.UpdatedAt = i.UpdatedAt r.UpdatedAt = i.UpdatedAt
} }
func (r *imageRow) resolve() *models.Image {
return &models.Image{
ID: r.ID,
Title: r.Title.String,
Rating: nullIntPtr(r.Rating),
Organized: r.Organized,
OCounter: r.OCounter,
StudioID: nullIntPtr(r.StudioID),
CreatedAt: r.CreatedAt,
UpdatedAt: r.UpdatedAt,
}
}
type imageRowRecord struct { type imageRowRecord struct {
updateRecord updateRecord
} }
@@ -62,109 +75,24 @@ func (r *imageRowRecord) fromPartial(i models.ImagePartial) {
r.setTime("updated_at", i.UpdatedAt) r.setTime("updated_at", i.UpdatedAt)
} }
type imageQueryRow struct {
imageRow
relatedFileQueryRow
GalleryID null.Int `db:"gallery_id"`
TagID null.Int `db:"tag_id"`
PerformerID null.Int `db:"performer_id"`
}
func (r *imageQueryRow) resolve() *models.Image {
ret := &models.Image{
ID: r.ID,
Title: r.Title.String,
Rating: nullIntPtr(r.Rating),
Organized: r.Organized,
OCounter: r.OCounter,
StudioID: nullIntPtr(r.StudioID),
CreatedAt: r.CreatedAt,
UpdatedAt: r.UpdatedAt,
}
r.appendRelationships(ret)
return ret
}
func appendImageFileUnique(vs []*file.ImageFile, toAdd *file.ImageFile, isPrimary bool) []*file.ImageFile {
// check in reverse, since it's most likely to be the last one
for i := len(vs) - 1; i >= 0; i-- {
if vs[i].Base().ID == toAdd.Base().ID {
// merge the two
mergeFiles(vs[i], toAdd)
return vs
}
}
if !isPrimary {
return append(vs, toAdd)
}
// primary should be first
return append([]*file.ImageFile{toAdd}, vs...)
}
func (r *imageQueryRow) appendRelationships(i *models.Image) {
if r.GalleryID.Valid {
i.GalleryIDs = intslice.IntAppendUnique(i.GalleryIDs, int(r.GalleryID.Int64))
}
if r.TagID.Valid {
i.TagIDs = intslice.IntAppendUnique(i.TagIDs, int(r.TagID.Int64))
}
if r.PerformerID.Valid {
i.PerformerIDs = intslice.IntAppendUnique(i.PerformerIDs, int(r.PerformerID.Int64))
}
if r.relatedFileQueryRow.FileID.Valid {
f := r.fileQueryRow.resolve().(*file.ImageFile)
i.Files = appendImageFileUnique(i.Files, f, r.Primary.Bool)
}
}
type imageQueryRows []imageQueryRow
func (r imageQueryRows) resolve() []*models.Image {
var ret []*models.Image
var last *models.Image
var lastID int
for _, row := range r {
if last == nil || lastID != row.ID {
f := row.resolve()
last = f
lastID = row.ID
ret = append(ret, last)
continue
}
// must be merging with previous row
row.appendRelationships(last)
}
return ret
}
type ImageStore struct { type ImageStore struct {
repository repository
tableMgr *table tableMgr *table
queryTableMgr *table
oCounterManager oCounterManager
fileStore *FileStore
} }
func NewImageStore() *ImageStore { func NewImageStore(fileStore *FileStore) *ImageStore {
return &ImageStore{ return &ImageStore{
repository: repository{ repository: repository{
tableName: imageTable, tableName: imageTable,
idColumn: idColumn, idColumn: idColumn,
}, },
tableMgr: imageTableMgr, tableMgr: imageTableMgr,
queryTableMgr: imageQueryTableMgr,
oCounterManager: oCounterManager{imageTableMgr}, oCounterManager: oCounterManager{imageTableMgr},
fileStore: fileStore,
} }
} }
@@ -172,10 +100,6 @@ func (qb *ImageStore) table() exp.IdentifierExpression {
return qb.tableMgr.table return qb.tableMgr.table
} }
func (qb *ImageStore) queryTable() exp.IdentifierExpression {
return qb.queryTableMgr.table
}
func (qb *ImageStore) Create(ctx context.Context, newObject *models.ImageCreateInput) error { func (qb *ImageStore) Create(ctx context.Context, newObject *models.ImageCreateInput) error {
var r imageRow var r imageRow
r.fromImage(*newObject.Image) r.fromImage(*newObject.Image)
@@ -291,21 +215,30 @@ func (qb *ImageStore) Find(ctx context.Context, id int) (*models.Image, error) {
} }
func (qb *ImageStore) FindMany(ctx context.Context, ids []int) ([]*models.Image, error) { func (qb *ImageStore) FindMany(ctx context.Context, ids []int) ([]*models.Image, error) {
var images []*models.Image q := qb.selectDataset().Prepared(true).Where(qb.table().Col(idColumn).In(ids))
for _, id := range ids { unsorted, err := qb.getMany(ctx, q)
image, err := qb.Find(ctx, id) if err != nil {
if err != nil { return nil, err
return nil, err }
}
images = append(images, image) images := make([]*models.Image, len(ids))
for _, s := range unsorted {
i := intslice.IntIndex(ids, s.ID)
images[i] = s
}
for i := range images {
if images[i] == nil {
return nil, fmt.Errorf("image with id %d not found", ids[i])
}
} }
return images, nil return images, nil
} }
func (qb *ImageStore) selectDataset() *goqu.SelectDataset { func (qb *ImageStore) selectDataset() *goqu.SelectDataset {
return dialect.From(imagesQueryTable).Select(imagesQueryTable.All()) return dialect.From(qb.table()).Select(qb.table().All())
} }
func (qb *ImageStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Image, error) { func (qb *ImageStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Image, error) {
@@ -323,24 +256,84 @@ func (qb *ImageStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.I
func (qb *ImageStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Image, error) { func (qb *ImageStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Image, error) {
const single = false const single = false
var rows imageQueryRows var ret []*models.Image
if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error {
var f imageQueryRow var f imageRow
if err := r.StructScan(&f); err != nil { if err := r.StructScan(&f); err != nil {
return err return err
} }
rows = append(rows, f) i := f.resolve()
if err := qb.resolveRelationships(ctx, i); err != nil {
return err
}
ret = append(ret, i)
return nil return nil
}); err != nil { }); err != nil {
return nil, err return nil, err
} }
return rows.resolve(), nil return ret, nil
}
func (qb *ImageStore) resolveRelationships(ctx context.Context, i *models.Image) error {
var err error
// files
i.Files, err = qb.getFiles(ctx, i.ID)
if err != nil {
return fmt.Errorf("resolving image files: %w", err)
}
// performers
i.PerformerIDs, err = qb.performersRepository().getIDs(ctx, i.ID)
if err != nil {
return fmt.Errorf("resolving image performers: %w", err)
}
// tags
i.TagIDs, err = qb.tagsRepository().getIDs(ctx, i.ID)
if err != nil {
return fmt.Errorf("resolving image tags: %w", err)
}
// galleries
i.GalleryIDs, err = qb.galleriesRepository().getIDs(ctx, i.ID)
if err != nil {
return fmt.Errorf("resolving image galleries: %w", err)
}
return nil
}
func (qb *ImageStore) getFiles(ctx context.Context, id int) ([]*file.ImageFile, error) {
fileIDs, err := qb.filesRepository().get(ctx, id)
if err != nil {
return nil, err
}
// use fileStore to load files
files, err := qb.fileStore.Find(ctx, fileIDs...)
if err != nil {
return nil, err
}
ret := make([]*file.ImageFile, len(files))
for i, f := range files {
var ok bool
ret[i], ok = f.(*file.ImageFile)
if !ok {
return nil, fmt.Errorf("expected file to be *file.ImageFile not %T", f)
}
}
return ret, nil
} }
func (qb *ImageStore) find(ctx context.Context, id int) (*models.Image, error) { func (qb *ImageStore) find(ctx context.Context, id int) (*models.Image, error) {
q := qb.selectDataset().Where(qb.queryTableMgr.byID(id)) q := qb.selectDataset().Where(qb.tableMgr.byID(id))
ret, err := qb.get(ctx, q) ret, err := qb.get(ctx, q)
if err != nil { if err != nil {
@@ -351,7 +344,7 @@ func (qb *ImageStore) find(ctx context.Context, id int) (*models.Image, error) {
} }
func (qb *ImageStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Image, error) { func (qb *ImageStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Image, error) {
table := qb.queryTable() table := qb.table()
q := qb.selectDataset().Prepared(true).Where( q := qb.selectDataset().Prepared(true).Where(
table.Col(idColumn).Eq( table.Col(idColumn).Eq(
@@ -430,7 +423,8 @@ func (qb *ImageStore) FindByChecksum(ctx context.Context, checksum string) ([]*m
func (qb *ImageStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*models.Image, error) { func (qb *ImageStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*models.Image, error) {
table := qb.table() table := qb.table()
queryTable := qb.queryTable() fileTable := fileTableMgr.table
folderTable := folderTableMgr.table
sq := dialect.From(table). sq := dialect.From(table).
InnerJoin( InnerJoin(
@@ -441,11 +435,20 @@ func (qb *ImageStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*mo
galleriesImagesJoinTable.Col("gallery_id").Eq(galleryID), galleriesImagesJoinTable.Col("gallery_id").Eq(galleryID),
) )
q := qb.selectDataset().Prepared(true).Where( q := qb.selectDataset().Prepared(true).LeftJoin(
queryTable.Col(idColumn).Eq( imagesFilesJoinTable,
goqu.On(imagesFilesJoinTable.Col(imageIDColumn).Eq(table.Col(idColumn))),
).LeftJoin(
fileTable,
goqu.On(fileTable.Col(idColumn).Eq(imagesFilesJoinTable.Col(fileIDColumn))),
).LeftJoin(
folderTable,
goqu.On(folderTable.Col(idColumn).Eq(fileTable.Col("parent_folder_id"))),
).Where(
table.Col(idColumn).Eq(
sq, sq,
), ),
).Order(queryTable.Col("parent_folder_path").Asc(), queryTable.Col("basename").Asc()) ).Order(folderTable.Col("path").Asc(), fileTable.Col("basename").Asc())
ret, err := qb.getMany(ctx, q) ret, err := qb.getMany(ctx, q)
if err != nil { if err != nil {
@@ -969,7 +972,7 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod
sortClause = getCountSort(imageTable, imagesTagsTable, imageIDColumn, direction) sortClause = getCountSort(imageTable, imagesTagsTable, imageIDColumn, direction)
case "performer_count": case "performer_count":
sortClause = getCountSort(imageTable, performersImagesTable, imageIDColumn, direction) sortClause = getCountSort(imageTable, performersImagesTable, imageIDColumn, direction)
case "mod_time", "size": case "mod_time", "filesize":
addFilesJoin() addFilesJoin()
sortClause = getSort(sort, direction, "files") sortClause = getSort(sort, direction, "files")
default: default:
@@ -991,6 +994,16 @@ func (qb *ImageStore) galleriesRepository() *joinRepository {
} }
} }
func (qb *ImageStore) filesRepository() *filesRepository {
return &filesRepository{
repository: repository{
tx: qb.tx,
tableName: imagesFilesTable,
idColumn: imageIDColumn,
},
}
}
// func (qb *imageQueryBuilder) GetGalleryIDs(ctx context.Context, imageID int) ([]int, error) { // func (qb *imageQueryBuilder) GetGalleryIDs(ctx context.Context, imageID int) ([]int, error) {
// return qb.galleriesRepository().getIDs(ctx, imageID) // return qb.galleriesRepository().getIDs(ctx, imageID)
// } // }

View File

@@ -44,6 +44,7 @@ func Test_imageQueryBuilder_Create(t *testing.T) {
GalleryIDs: []int{galleryIDs[galleryIdxWithImage]}, GalleryIDs: []int{galleryIDs[galleryIdxWithImage]},
TagIDs: []int{tagIDs[tagIdx1WithImage], tagIDs[tagIdx1WithDupName]}, TagIDs: []int{tagIDs[tagIdx1WithImage], tagIDs[tagIdx1WithDupName]},
PerformerIDs: []int{performerIDs[performerIdx1WithImage], performerIDs[performerIdx1WithDupName]}, PerformerIDs: []int{performerIDs[performerIdx1WithImage], performerIDs[performerIdx1WithDupName]},
Files: []*file.ImageFile{},
}, },
false, false,
}, },
@@ -193,9 +194,12 @@ func Test_imageQueryBuilder_Update(t *testing.T) {
Files: []*file.ImageFile{ Files: []*file.ImageFile{
makeImageFileWithID(imageIdxWithGallery), makeImageFileWithID(imageIdxWithGallery),
}, },
Organized: true, GalleryIDs: []int{},
CreatedAt: createdAt, TagIDs: []int{},
UpdatedAt: updatedAt, PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
}, },
false, false,
}, },
@@ -206,9 +210,12 @@ func Test_imageQueryBuilder_Update(t *testing.T) {
Files: []*file.ImageFile{ Files: []*file.ImageFile{
makeImageFileWithID(imageIdxWithGallery), makeImageFileWithID(imageIdxWithGallery),
}, },
Organized: true, GalleryIDs: []int{},
CreatedAt: createdAt, TagIDs: []int{},
UpdatedAt: updatedAt, PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
}, },
false, false,
}, },
@@ -219,9 +226,12 @@ func Test_imageQueryBuilder_Update(t *testing.T) {
Files: []*file.ImageFile{ Files: []*file.ImageFile{
makeImageFileWithID(imageIdxWithTag), makeImageFileWithID(imageIdxWithTag),
}, },
Organized: true, GalleryIDs: []int{},
CreatedAt: createdAt, TagIDs: []int{},
UpdatedAt: updatedAt, PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
}, },
false, false,
}, },
@@ -232,9 +242,12 @@ func Test_imageQueryBuilder_Update(t *testing.T) {
Files: []*file.ImageFile{ Files: []*file.ImageFile{
makeImageFileWithID(imageIdxWithPerformer), makeImageFileWithID(imageIdxWithPerformer),
}, },
Organized: true, GalleryIDs: []int{},
CreatedAt: createdAt, TagIDs: []int{},
UpdatedAt: updatedAt, PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
}, },
false, false,
}, },
@@ -403,6 +416,9 @@ func Test_imageQueryBuilder_UpdatePartial(t *testing.T) {
Files: []*file.ImageFile{ Files: []*file.ImageFile{
makeImageFile(imageIdx1WithGallery), makeImageFile(imageIdx1WithGallery),
}, },
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
}, },
false, false,
}, },
@@ -596,7 +612,9 @@ func Test_imageQueryBuilder_UpdatePartialRelationships(t *testing.T) {
Mode: models.RelationshipUpdateModeRemove, Mode: models.RelationshipUpdateModeRemove,
}, },
}, },
models.Image{}, models.Image{
GalleryIDs: []int{},
},
false, false,
}, },
{ {
@@ -2398,7 +2416,7 @@ func TestImageQuerySorting(t *testing.T) {
}, },
{ {
"file size", "file size",
"size", "filesize",
models.SortDirectionEnumDesc, models.SortDirectionEnumDesc,
-1, -1,
-1, -1,

View File

@@ -539,139 +539,3 @@ ALTER TABLE `scenes_new` rename to `scenes`;
CREATE INDEX `index_scenes_on_studio_id` on `scenes` (`studio_id`); CREATE INDEX `index_scenes_on_studio_id` on `scenes` (`studio_id`);
PRAGMA foreign_keys=ON; PRAGMA foreign_keys=ON;
-- create views to simplify queries
CREATE VIEW `images_query` AS
SELECT
`images`.`id`,
`images`.`title`,
`images`.`rating`,
`images`.`organized`,
`images`.`o_counter`,
`images`.`studio_id`,
`images`.`created_at`,
`images`.`updated_at`,
`galleries_images`.`gallery_id`,
`images_tags`.`tag_id`,
`performers_images`.`performer_id`,
`image_files`.`format` as `image_format`,
`image_files`.`width` as `image_width`,
`image_files`.`height` as `image_height`,
`files`.`id` as `file_id`,
`files`.`basename`,
`files`.`size`,
`files`.`mod_time`,
`files`.`zip_file_id`,
`folders`.`id` as `parent_folder_id`,
`folders`.`path` as `parent_folder_path`,
`zip_files`.`basename` as `zip_basename`,
`zip_files_folders`.`path` as `zip_folder_path`,
`files_fingerprints`.`type` as `fingerprint_type`,
`files_fingerprints`.`fingerprint`
FROM `images`
LEFT JOIN `performers_images` ON (`images`.`id` = `performers_images`.`image_id`)
LEFT JOIN `galleries_images` ON (`images`.`id` = `galleries_images`.`image_id`)
LEFT JOIN `images_tags` ON (`images`.`id` = `images_tags`.`image_id`)
LEFT JOIN `images_files` ON (`images`.`id` = `images_files`.`image_id`)
LEFT JOIN `image_files` ON (`images_files`.`file_id` = `image_files`.`file_id`)
LEFT JOIN `files` ON (`images_files`.`file_id` = `files`.`id`)
LEFT JOIN `folders` ON (`files`.`parent_folder_id` = `folders`.`id`)
LEFT JOIN `files` AS `zip_files` ON (`files`.`zip_file_id` = `zip_files`.`id`)
LEFT JOIN `folders` AS `zip_files_folders` ON (`zip_files`.`parent_folder_id` = `zip_files_folders`.`id`)
LEFT JOIN `files_fingerprints` ON (`images_files`.`file_id` = `files_fingerprints`.`file_id`);
CREATE VIEW `galleries_query` AS
SELECT
`galleries`.`id`,
`galleries`.`title`,
`galleries`.`url`,
`galleries`.`date`,
`galleries`.`details`,
`galleries`.`rating`,
`galleries`.`organized`,
`galleries`.`studio_id`,
`galleries`.`created_at`,
`galleries`.`updated_at`,
`galleries_tags`.`tag_id`,
`scenes_galleries`.`scene_id`,
`performers_galleries`.`performer_id`,
`galleries_folders`.`id` as `folder_id`,
`galleries_folders`.`path` as `folder_path`,
`files`.`id` as `file_id`,
`files`.`basename`,
`files`.`size`,
`files`.`mod_time`,
`files`.`zip_file_id`,
`parent_folders`.`id` as `parent_folder_id`,
`parent_folders`.`path` as `parent_folder_path`,
`zip_files`.`basename` as `zip_basename`,
`zip_files_folders`.`path` as `zip_folder_path`,
`files_fingerprints`.`type` as `fingerprint_type`,
`files_fingerprints`.`fingerprint`
FROM `galleries`
LEFT JOIN `performers_galleries` ON (`galleries`.`id` = `performers_galleries`.`gallery_id`)
LEFT JOIN `galleries_tags` ON (`galleries`.`id` = `galleries_tags`.`gallery_id`)
LEFT JOIN `scenes_galleries` ON (`galleries`.`id` = `scenes_galleries`.`gallery_id`)
LEFT JOIN `folders` AS `galleries_folders` ON (`galleries`.`folder_id` = `galleries_folders`.`id`)
LEFT JOIN `galleries_files` ON (`galleries`.`id` = `galleries_files`.`gallery_id`)
LEFT JOIN `files` ON (`galleries_files`.`file_id` = `files`.`id`)
LEFT JOIN `folders` AS `parent_folders` ON (`files`.`parent_folder_id` = `parent_folders`.`id`)
LEFT JOIN `files` AS `zip_files` ON (`files`.`zip_file_id` = `zip_files`.`id`)
LEFT JOIN `folders` AS `zip_files_folders` ON (`zip_files`.`parent_folder_id` = `zip_files_folders`.`id`)
LEFT JOIN `files_fingerprints` ON (`galleries_files`.`file_id` = `files_fingerprints`.`file_id`);
CREATE VIEW `scenes_query` AS
SELECT
`scenes`.`id`,
`scenes`.`title`,
`scenes`.`details`,
`scenes`.`url`,
`scenes`.`date`,
`scenes`.`rating`,
`scenes`.`studio_id`,
`scenes`.`o_counter`,
`scenes`.`organized`,
`scenes`.`created_at`,
`scenes`.`updated_at`,
`scenes_tags`.`tag_id`,
`scenes_galleries`.`gallery_id`,
`performers_scenes`.`performer_id`,
`movies_scenes`.`movie_id`,
`movies_scenes`.`scene_index`,
`scene_stash_ids`.`stash_id`,
`scene_stash_ids`.`endpoint`,
`video_files`.`format` as `video_format`,
`video_files`.`width` as `video_width`,
`video_files`.`height` as `video_height`,
`video_files`.`duration`,
`video_files`.`video_codec`,
`video_files`.`audio_codec`,
`video_files`.`frame_rate`,
`video_files`.`bit_rate`,
`video_files`.`interactive`,
`video_files`.`interactive_speed`,
`files`.`id` as `file_id`,
`files`.`basename`,
`files`.`size`,
`files`.`mod_time`,
`files`.`zip_file_id`,
`folders`.`id` as `parent_folder_id`,
`folders`.`path` as `parent_folder_path`,
`zip_files`.`basename` as `zip_basename`,
`zip_files_folders`.`path` as `zip_folder_path`,
`files_fingerprints`.`type` as `fingerprint_type`,
`files_fingerprints`.`fingerprint`
FROM `scenes`
LEFT JOIN `performers_scenes` ON (`scenes`.`id` = `performers_scenes`.`scene_id`)
LEFT JOIN `scenes_tags` ON (`scenes`.`id` = `scenes_tags`.`scene_id`)
LEFT JOIN `movies_scenes` ON (`scenes`.`id` = `movies_scenes`.`scene_id`)
LEFT JOIN `scene_stash_ids` ON (`scenes`.`id` = `scene_stash_ids`.`scene_id`)
LEFT JOIN `scenes_galleries` ON (`scenes`.`id` = `scenes_galleries`.`scene_id`)
LEFT JOIN `scenes_files` ON (`scenes`.`id` = `scenes_files`.`scene_id`)
LEFT JOIN `video_files` ON (`scenes_files`.`file_id` = `video_files`.`file_id`)
LEFT JOIN `files` ON (`scenes_files`.`file_id` = `files`.`id`)
LEFT JOIN `folders` ON (`files`.`parent_folder_id` = `folders`.`id`)
LEFT JOIN `files` AS `zip_files` ON (`files`.`zip_file_id` = `zip_files`.`id`)
LEFT JOIN `folders` AS `zip_files_folders` ON (`zip_files`.`parent_folder_id` = `zip_files_folders`.`id`)
LEFT JOIN `files_fingerprints` ON (`scenes_files`.`file_id` = `files_fingerprints`.`file_id`);

View File

@@ -32,6 +32,11 @@ func (r *repository) getByID(ctx context.Context, id int, dest interface{}) erro
return r.tx.Get(ctx, dest, stmt, id) return r.tx.Get(ctx, dest, stmt, id)
} }
func (r *repository) getAll(ctx context.Context, id int, f func(rows *sqlx.Rows) error) error {
stmt := fmt.Sprintf("SELECT * FROM %s WHERE %s = ?", r.tableName, r.idColumn)
return r.queryFunc(ctx, stmt, []interface{}{id}, false, f)
}
func (r *repository) insert(ctx context.Context, obj interface{}) (sql.Result, error) { func (r *repository) insert(ctx context.Context, obj interface{}) (sql.Result, error) {
stmt := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", r.tableName, listKeys(obj, false), listKeys(obj, true)) stmt := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", r.tableName, listKeys(obj, false), listKeys(obj, true))
return r.tx.NamedExec(ctx, stmt, obj) return r.tx.NamedExec(ctx, stmt, obj)
@@ -468,6 +473,41 @@ func (r *stashIDRepository) replace(ctx context.Context, id int, newIDs []*model
return nil return nil
} }
type filesRepository struct {
repository
}
func (r *filesRepository) get(ctx context.Context, id int) ([]file.ID, error) {
query := fmt.Sprintf("SELECT file_id, `primary` from %s WHERE %s = ?", r.tableName, r.idColumn)
type relatedFile struct {
FileID file.ID `db:"file_id"`
Primary bool `db:"primary"`
}
var ret []file.ID
if err := r.queryFunc(ctx, query, []interface{}{id}, false, func(rows *sqlx.Rows) error {
var f relatedFile
if err := rows.StructScan(&f); err != nil {
return err
}
if f.Primary {
// prepend to list
ret = append([]file.ID{f.FileID}, ret...)
} else {
ret = append(ret, f.FileID)
}
return nil
}); err != nil {
return nil, err
}
return ret, nil
}
func listKeys(i interface{}, addPrefix bool) string { func listKeys(i interface{}, addPrefix bool) string {
var query []string var query []string
v := reflect.ValueOf(i) v := reflect.ValueOf(i)

View File

@@ -82,6 +82,22 @@ func (r *sceneRow) fromScene(o models.Scene) {
r.UpdatedAt = o.UpdatedAt r.UpdatedAt = o.UpdatedAt
} }
func (r *sceneRow) resolve() *models.Scene {
return &models.Scene{
ID: r.ID,
Title: r.Title.String,
Details: r.Details.String,
URL: r.URL.String,
Date: r.Date.DatePtr(),
Rating: nullIntPtr(r.Rating),
Organized: r.Organized,
OCounter: r.OCounter,
StudioID: nullIntPtr(r.StudioID),
CreatedAt: r.CreatedAt,
UpdatedAt: r.UpdatedAt,
}
}
type sceneRowRecord struct { type sceneRowRecord struct {
updateRecord updateRecord
} }
@@ -99,139 +115,16 @@ func (r *sceneRowRecord) fromPartial(o models.ScenePartial) {
r.setTime("updated_at", o.UpdatedAt) r.setTime("updated_at", o.UpdatedAt)
} }
type sceneQueryRow struct {
sceneRow
relatedFileQueryRow
GalleryID null.Int `db:"gallery_id"`
TagID null.Int `db:"tag_id"`
PerformerID null.Int `db:"performer_id"`
moviesScenesRow
stashIDRow
}
func (r *sceneQueryRow) resolve() *models.Scene {
ret := &models.Scene{
ID: r.ID,
Title: r.Title.String,
Details: r.Details.String,
URL: r.URL.String,
Date: r.Date.DatePtr(),
Rating: nullIntPtr(r.Rating),
Organized: r.Organized,
OCounter: r.OCounter,
StudioID: nullIntPtr(r.StudioID),
CreatedAt: r.CreatedAt,
UpdatedAt: r.UpdatedAt,
}
r.appendRelationships(ret)
return ret
}
func movieAppendUnique(e []models.MoviesScenes, toAdd models.MoviesScenes) []models.MoviesScenes {
for _, ee := range e {
if ee.Equal(toAdd) {
return e
}
}
return append(e, toAdd)
}
func stashIDAppendUnique(e []models.StashID, toAdd models.StashID) []models.StashID {
for _, ee := range e {
if ee == toAdd {
return e
}
}
return append(e, toAdd)
}
func appendVideoFileUnique(vs []*file.VideoFile, toAdd *file.VideoFile, isPrimary bool) []*file.VideoFile {
// check in reverse, since it's most likely to be the last one
for i := len(vs) - 1; i >= 0; i-- {
if vs[i].Base().ID == toAdd.Base().ID {
// merge the two
mergeFiles(vs[i], toAdd)
return vs
}
}
if !isPrimary {
return append(vs, toAdd)
}
// primary should be first
return append([]*file.VideoFile{toAdd}, vs...)
}
func (r *sceneQueryRow) appendRelationships(i *models.Scene) {
if r.TagID.Valid {
i.TagIDs = intslice.IntAppendUnique(i.TagIDs, int(r.TagID.Int64))
}
if r.PerformerID.Valid {
i.PerformerIDs = intslice.IntAppendUnique(i.PerformerIDs, int(r.PerformerID.Int64))
}
if r.GalleryID.Valid {
i.GalleryIDs = intslice.IntAppendUnique(i.GalleryIDs, int(r.GalleryID.Int64))
}
if r.MovieID.Valid {
i.Movies = movieAppendUnique(i.Movies, models.MoviesScenes{
MovieID: int(r.MovieID.Int64),
SceneIndex: nullIntPtr(r.SceneIndex),
})
}
if r.StashID.Valid {
i.StashIDs = stashIDAppendUnique(i.StashIDs, models.StashID{
StashID: r.StashID.String,
Endpoint: r.Endpoint.String,
})
}
if r.relatedFileQueryRow.FileID.Valid {
f := r.fileQueryRow.resolve().(*file.VideoFile)
i.Files = appendVideoFileUnique(i.Files, f, r.Primary.Bool)
}
}
type sceneQueryRows []sceneQueryRow
func (r sceneQueryRows) resolve() []*models.Scene {
var ret []*models.Scene
var last *models.Scene
var lastID int
for _, row := range r {
if last == nil || lastID != row.ID {
f := row.resolve()
last = f
lastID = row.ID
ret = append(ret, last)
continue
}
// must be merging with previous row
row.appendRelationships(last)
}
return ret
}
type SceneStore struct { type SceneStore struct {
repository repository
tableMgr *table tableMgr *table
queryTableMgr *table
oCounterManager oCounterManager
fileStore *FileStore
} }
func NewSceneStore() *SceneStore { func NewSceneStore(fileStore *FileStore) *SceneStore {
return &SceneStore{ return &SceneStore{
repository: repository{ repository: repository{
tableName: sceneTable, tableName: sceneTable,
@@ -239,8 +132,8 @@ func NewSceneStore() *SceneStore {
}, },
tableMgr: sceneTableMgr, tableMgr: sceneTableMgr,
queryTableMgr: sceneQueryTableMgr,
oCounterManager: oCounterManager{sceneTableMgr}, oCounterManager: oCounterManager{sceneTableMgr},
fileStore: fileStore,
} }
} }
@@ -248,10 +141,6 @@ func (qb *SceneStore) table() exp.IdentifierExpression {
return qb.tableMgr.table return qb.tableMgr.table
} }
func (qb *SceneStore) queryTable() exp.IdentifierExpression {
return qb.queryTableMgr.table
}
func (qb *SceneStore) Create(ctx context.Context, newObject *models.Scene, fileIDs []file.ID) error { func (qb *SceneStore) Create(ctx context.Context, newObject *models.Scene, fileIDs []file.ID) error {
var r sceneRow var r sceneRow
r.fromScene(*newObject) r.fromScene(*newObject)
@@ -392,25 +281,32 @@ func (qb *SceneStore) Find(ctx context.Context, id int) (*models.Scene, error) {
} }
func (qb *SceneStore) FindMany(ctx context.Context, ids []int) ([]*models.Scene, error) { func (qb *SceneStore) FindMany(ctx context.Context, ids []int) ([]*models.Scene, error) {
var scenes []*models.Scene table := qb.table()
for _, id := range ids { q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(ids))
scene, err := qb.Find(ctx, id) unsorted, err := qb.getMany(ctx, q)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if scene == nil { scenes := make([]*models.Scene, len(ids))
return nil, fmt.Errorf("scene with id %d not found", id)
}
scenes = append(scenes, scene) for _, s := range unsorted {
i := intslice.IntIndex(ids, s.ID)
scenes[i] = s
}
for i := range scenes {
if scenes[i] == nil {
return nil, fmt.Errorf("scene with id %d not found", ids[i])
}
} }
return scenes, nil return scenes, nil
} }
func (qb *SceneStore) selectDataset() *goqu.SelectDataset { func (qb *SceneStore) selectDataset() *goqu.SelectDataset {
return dialect.From(scenesQueryTable).Select(scenesQueryTable.All()) table := qb.table()
return dialect.From(table).Select(table.All())
} }
func (qb *SceneStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Scene, error) { func (qb *SceneStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Scene, error) {
@@ -428,24 +324,127 @@ func (qb *SceneStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.S
func (qb *SceneStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Scene, error) { func (qb *SceneStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Scene, error) {
const single = false const single = false
var rows sceneQueryRows var ret []*models.Scene
if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error { if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error {
var f sceneQueryRow var f sceneRow
if err := r.StructScan(&f); err != nil { if err := r.StructScan(&f); err != nil {
return err return err
} }
rows = append(rows, f) s := f.resolve()
if err := qb.resolveRelationships(ctx, s); err != nil {
return err
}
ret = append(ret, s)
return nil return nil
}); err != nil { }); err != nil {
return nil, err return nil, err
} }
return rows.resolve(), nil return ret, nil
}
func (qb *SceneStore) resolveRelationships(ctx context.Context, s *models.Scene) error {
var err error
// files
s.Files, err = qb.getFiles(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving scene files: %w", err)
}
// movies
s.Movies, err = qb.getMovies(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving scene movies: %w", err)
}
// performers
s.PerformerIDs, err = qb.performersRepository().getIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving scene performers: %w", err)
}
// tags
s.TagIDs, err = qb.tagsRepository().getIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving scene tags: %w", err)
}
// galleries
s.GalleryIDs, err = qb.galleriesRepository().getIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving scene galleries: %w", err)
}
// stash ids
s.StashIDs, err = qb.getStashIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving scene stash ids: %w", err)
}
return nil
}
func (qb *SceneStore) getFiles(ctx context.Context, id int) ([]*file.VideoFile, error) {
fileIDs, err := qb.filesRepository().get(ctx, id)
if err != nil {
return nil, err
}
// use fileStore to load files
files, err := qb.fileStore.Find(ctx, fileIDs...)
if err != nil {
return nil, err
}
ret := make([]*file.VideoFile, len(files))
for i, f := range files {
var ok bool
ret[i], ok = f.(*file.VideoFile)
if !ok {
return nil, fmt.Errorf("expected file to be *file.VideoFile not %T", f)
}
}
return ret, nil
}
func (qb *SceneStore) getMovies(ctx context.Context, id int) (ret []models.MoviesScenes, err error) {
ret = []models.MoviesScenes{}
if err := qb.moviesRepository().getAll(ctx, id, func(rows *sqlx.Rows) error {
var ms moviesScenesRow
if err := rows.StructScan(&ms); err != nil {
return err
}
ret = append(ret, ms.resolve(id))
return nil
}); err != nil {
return nil, err
}
return ret, nil
}
func (qb *SceneStore) getStashIDs(ctx context.Context, id int) ([]models.StashID, error) {
stashIDs, err := qb.stashIDRepository().get(ctx, id)
if err != nil {
return nil, err
}
ret := make([]models.StashID, len(stashIDs))
for i, sid := range stashIDs {
ret[i] = *sid
}
return ret, nil
} }
func (qb *SceneStore) find(ctx context.Context, id int) (*models.Scene, error) { func (qb *SceneStore) find(ctx context.Context, id int) (*models.Scene, error) {
q := qb.selectDataset().Where(qb.queryTableMgr.byID(id)) q := qb.selectDataset().Where(qb.tableMgr.byID(id))
ret, err := qb.get(ctx, q) ret, err := qb.get(ctx, q)
if err != nil { if err != nil {
@@ -552,7 +551,7 @@ func (qb *SceneStore) FindByPath(ctx context.Context, p string) ([]*models.Scene
} }
func (qb *SceneStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Scene, error) { func (qb *SceneStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Scene, error) {
table := qb.queryTable() table := qb.table()
q := qb.selectDataset().Where( q := qb.selectDataset().Where(
table.Col(idColumn).Eq( table.Col(idColumn).Eq(
@@ -706,16 +705,28 @@ func (qb *SceneStore) Wall(ctx context.Context, q *string) ([]*models.Scene, err
s = *q s = *q
} }
table := qb.queryTable() table := qb.table()
qq := qb.selectDataset().Prepared(true).Where(table.Col("details").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80) qq := qb.selectDataset().Prepared(true).Where(table.Col("details").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80)
return qb.getMany(ctx, qq) return qb.getMany(ctx, qq)
} }
func (qb *SceneStore) All(ctx context.Context) ([]*models.Scene, error) { func (qb *SceneStore) All(ctx context.Context) ([]*models.Scene, error) {
return qb.getMany(ctx, qb.selectDataset().Order( table := qb.table()
qb.queryTable().Col("parent_folder_path").Asc(), fileTable := fileTableMgr.table
qb.queryTable().Col("basename").Asc(), folderTable := folderTableMgr.table
qb.queryTable().Col("date").Asc(), return qb.getMany(ctx, qb.selectDataset().LeftJoin(
scenesFilesJoinTable,
goqu.On(scenesFilesJoinTable.Col(sceneIDColumn).Eq(table.Col(idColumn))),
).LeftJoin(
fileTable,
goqu.On(fileTable.Col(idColumn).Eq(scenesFilesJoinTable.Col(fileIDColumn))),
).LeftJoin(
folderTable,
goqu.On(folderTable.Col(idColumn).Eq(fileTable.Col("parent_folder_id"))),
).Order(
folderTable.Col("path").Asc(),
fileTable.Col("basename").Asc(),
table.Col("date").Asc(),
)) ))
} }
@@ -1388,6 +1399,16 @@ func (qb *SceneStore) moviesRepository() *repository {
} }
} }
func (qb *SceneStore) filesRepository() *filesRepository {
return &filesRepository{
repository: repository{
tx: qb.tx,
tableName: scenesFilesTable,
idColumn: sceneIDColumn,
},
}
}
func (qb *SceneStore) performersRepository() *joinRepository { func (qb *SceneStore) performersRepository() *joinRepository {
return &joinRepository{ return &joinRepository{
repository: repository{ repository: repository{

View File

@@ -83,6 +83,7 @@ func Test_sceneQueryBuilder_Create(t *testing.T) {
Endpoint: endpoint2, Endpoint: endpoint2,
}, },
}, },
Files: []*file.VideoFile{},
}, },
false, false,
}, },
@@ -300,6 +301,11 @@ func Test_sceneQueryBuilder_Update(t *testing.T) {
Files: []*file.VideoFile{ Files: []*file.VideoFile{
makeSceneFileWithID(sceneIdxWithSpacedName), makeSceneFileWithID(sceneIdxWithSpacedName),
}, },
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Movies: []models.MoviesScenes{},
StashIDs: []models.StashID{},
}, },
false, false,
}, },
@@ -310,6 +316,11 @@ func Test_sceneQueryBuilder_Update(t *testing.T) {
Files: []*file.VideoFile{ Files: []*file.VideoFile{
makeSceneFileWithID(sceneIdxWithGallery), makeSceneFileWithID(sceneIdxWithGallery),
}, },
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Movies: []models.MoviesScenes{},
StashIDs: []models.StashID{},
}, },
false, false,
}, },
@@ -320,6 +331,11 @@ func Test_sceneQueryBuilder_Update(t *testing.T) {
Files: []*file.VideoFile{ Files: []*file.VideoFile{
makeSceneFileWithID(sceneIdxWithTag), makeSceneFileWithID(sceneIdxWithTag),
}, },
TagIDs: []int{},
GalleryIDs: []int{},
PerformerIDs: []int{},
Movies: []models.MoviesScenes{},
StashIDs: []models.StashID{},
}, },
false, false,
}, },
@@ -330,6 +346,11 @@ func Test_sceneQueryBuilder_Update(t *testing.T) {
Files: []*file.VideoFile{ Files: []*file.VideoFile{
makeSceneFileWithID(sceneIdxWithPerformer), makeSceneFileWithID(sceneIdxWithPerformer),
}, },
PerformerIDs: []int{},
TagIDs: []int{},
GalleryIDs: []int{},
Movies: []models.MoviesScenes{},
StashIDs: []models.StashID{},
}, },
false, false,
}, },
@@ -340,6 +361,11 @@ func Test_sceneQueryBuilder_Update(t *testing.T) {
Files: []*file.VideoFile{ Files: []*file.VideoFile{
makeSceneFileWithID(sceneIdxWithMovie), makeSceneFileWithID(sceneIdxWithMovie),
}, },
Movies: []models.MoviesScenes{},
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
StashIDs: []models.StashID{},
}, },
false, false,
}, },
@@ -350,7 +376,12 @@ func Test_sceneQueryBuilder_Update(t *testing.T) {
Files: []*file.VideoFile{ Files: []*file.VideoFile{
makeSceneFileWithID(sceneIdxWithGallery), makeSceneFileWithID(sceneIdxWithGallery),
}, },
StudioID: &invalidID, StudioID: &invalidID,
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Movies: []models.MoviesScenes{},
StashIDs: []models.StashID{},
}, },
true, true,
}, },
@@ -575,6 +606,11 @@ func Test_sceneQueryBuilder_UpdatePartial(t *testing.T) {
Files: []*file.VideoFile{ Files: []*file.VideoFile{
makeSceneFile(sceneIdxWithSpacedName), makeSceneFile(sceneIdxWithSpacedName),
}, },
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Movies: []models.MoviesScenes{},
StashIDs: []models.StashID{},
}, },
false, false,
}, },
@@ -738,7 +774,7 @@ func Test_sceneQueryBuilder_UpdatePartialRelationships(t *testing.T) {
}, },
}, },
models.Scene{ models.Scene{
StashIDs: append(stashIDs, []models.StashID{sceneStashID(sceneIdxWithSpacedName)}...), StashIDs: append([]models.StashID{sceneStashID(sceneIdxWithSpacedName)}, stashIDs...),
}, },
false, false,
}, },
@@ -892,7 +928,9 @@ func Test_sceneQueryBuilder_UpdatePartialRelationships(t *testing.T) {
Mode: models.RelationshipUpdateModeRemove, Mode: models.RelationshipUpdateModeRemove,
}, },
}, },
models.Scene{}, models.Scene{
GalleryIDs: []int{},
},
false, false,
}, },
{ {
@@ -936,7 +974,9 @@ func Test_sceneQueryBuilder_UpdatePartialRelationships(t *testing.T) {
Mode: models.RelationshipUpdateModeRemove, Mode: models.RelationshipUpdateModeRemove,
}, },
}, },
models.Scene{}, models.Scene{
Movies: []models.MoviesScenes{},
},
false, false,
}, },
{ {
@@ -948,7 +988,9 @@ func Test_sceneQueryBuilder_UpdatePartialRelationships(t *testing.T) {
Mode: models.RelationshipUpdateModeRemove, Mode: models.RelationshipUpdateModeRemove,
}, },
}, },
models.Scene{}, models.Scene{
StashIDs: []models.StashID{},
},
false, false,
}, },
{ {

View File

@@ -458,10 +458,6 @@ var (
) )
func indexesToIDs(ids []int, indexes []int) []int { func indexesToIDs(ids []int, indexes []int) []int {
if len(indexes) == 0 {
return nil
}
ret := make([]int, len(indexes)) ret := make([]int, len(indexes))
for i, idx := range indexes { for i, idx := range indexes {
ret[i] = ids[idx] ret[i] = ids[idx]
@@ -964,13 +960,10 @@ func makeScene(i int) *models.Scene {
mids := indexesToIDs(movieIDs, sceneMovies[i]) mids := indexesToIDs(movieIDs, sceneMovies[i])
var movies []models.MoviesScenes movies := make([]models.MoviesScenes, len(mids))
if len(mids) > 0 { for i, m := range mids {
movies = make([]models.MoviesScenes, len(mids)) movies[i] = models.MoviesScenes{
for i, m := range mids { MovieID: m,
movies[i] = models.MoviesScenes{
MovieID: m,
}
} }
} }

View File

@@ -361,6 +361,7 @@ type scenesMoviesTable struct {
} }
type moviesScenesRow struct { type moviesScenesRow struct {
SceneID null.Int `db:"scene_id"`
MovieID null.Int `db:"movie_id"` MovieID null.Int `db:"movie_id"`
SceneIndex null.Int `db:"scene_index"` SceneIndex null.Int `db:"scene_index"`
} }
@@ -552,12 +553,8 @@ func queryFunc(ctx context.Context, query *goqu.SelectDataset, single bool, f fu
return err return err
} }
tx, err := getDBReader(ctx) wrapper := dbWrapper{}
if err != nil { rows, err := wrapper.QueryxContext(ctx, q, args...)
return err
}
rows, err := tx.QueryxContext(ctx, q, args...)
if err != nil && !errors.Is(err, sql.ErrNoRows) { if err != nil && !errors.Is(err, sql.ErrNoRows) {
return fmt.Errorf("querying `%s` [%v]: %w", q, args, err) return fmt.Errorf("querying `%s` [%v]: %w", q, args, err)
@@ -586,12 +583,8 @@ func querySimple(ctx context.Context, query *goqu.SelectDataset, out interface{}
return err return err
} }
tx, err := getDBReader(ctx) wrapper := dbWrapper{}
if err != nil { rows, err := wrapper.QueryxContext(ctx, q, args...)
return err
}
rows, err := tx.QueryxContext(ctx, q, args...)
if err != nil { if err != nil {
return fmt.Errorf("querying `%s` [%v]: %w", q, args, err) return fmt.Errorf("querying `%s` [%v]: %w", q, args, err)
} }

View File

@@ -13,9 +13,6 @@ var (
imagesTagsJoinTable = goqu.T(imagesTagsTable) imagesTagsJoinTable = goqu.T(imagesTagsTable)
performersImagesJoinTable = goqu.T(performersImagesTable) performersImagesJoinTable = goqu.T(performersImagesTable)
imagesFilesJoinTable = goqu.T(imagesFilesTable) imagesFilesJoinTable = goqu.T(imagesFilesTable)
imagesQueryTable = goqu.T("images_query")
galleriesQueryTable = goqu.T("galleries_query")
scenesQueryTable = goqu.T("scenes_query")
galleriesFilesJoinTable = goqu.T(galleriesFilesTable) galleriesFilesJoinTable = goqu.T(galleriesFilesTable)
galleriesTagsJoinTable = goqu.T(galleriesTagsTable) galleriesTagsJoinTable = goqu.T(galleriesTagsTable)
@@ -35,11 +32,6 @@ var (
idColumn: goqu.T(imageTable).Col(idColumn), idColumn: goqu.T(imageTable).Col(idColumn),
} }
imageQueryTableMgr = &table{
table: imagesQueryTable,
idColumn: imagesQueryTable.Col(idColumn),
}
imagesFilesTableMgr = &relatedFilesTable{ imagesFilesTableMgr = &relatedFilesTable{
table: table{ table: table{
table: imagesFilesJoinTable, table: imagesFilesJoinTable,
@@ -78,11 +70,6 @@ var (
idColumn: goqu.T(galleryTable).Col(idColumn), idColumn: goqu.T(galleryTable).Col(idColumn),
} }
galleryQueryTableMgr = &table{
table: galleriesQueryTable,
idColumn: galleriesQueryTable.Col(idColumn),
}
galleriesFilesTableMgr = &relatedFilesTable{ galleriesFilesTableMgr = &relatedFilesTable{
table: table{ table: table{
table: galleriesFilesJoinTable, table: galleriesFilesJoinTable,
@@ -121,11 +108,6 @@ var (
idColumn: goqu.T(sceneTable).Col(idColumn), idColumn: goqu.T(sceneTable).Col(idColumn),
} }
sceneQueryTableMgr = &table{
table: scenesQueryTable,
idColumn: scenesQueryTable.Col(idColumn),
}
scenesFilesTableMgr = &relatedFilesTable{ scenesFilesTableMgr = &relatedFilesTable{
table: table{ table: table{
table: scenesFilesJoinTable, table: scenesFilesJoinTable,

View File

@@ -2,10 +2,12 @@ package sqlite
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"runtime/debug" "runtime/debug"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/mattn/go-sqlite3"
"github.com/stashapp/stash/pkg/logger" "github.com/stashapp/stash/pkg/logger"
"github.com/stashapp/stash/pkg/models" "github.com/stashapp/stash/pkg/models"
) )
@@ -100,6 +102,14 @@ func getDBReader(ctx context.Context) (dbReader, error) {
return tx, nil return tx, nil
} }
func (db *Database) IsLocked(err error) bool {
var sqliteError sqlite3.Error
if errors.As(err, &sqliteError) {
return sqliteError.Code == sqlite3.ErrBusy
}
return false
}
func (db *Database) TxnRepository() models.Repository { func (db *Database) TxnRepository() models.Repository {
return models.Repository{ return models.Repository{
TxnManager: db, TxnManager: db,

View File

@@ -3,6 +3,7 @@ package sqlite
import ( import (
"context" "context"
"database/sql" "database/sql"
"fmt"
"time" "time"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
@@ -31,67 +32,88 @@ func logSQL(start time.Time, query string, args ...interface{}) {
type dbWrapper struct{} type dbWrapper struct{}
func sqlError(err error, sql string, args ...interface{}) error {
if err == nil {
return nil
}
return fmt.Errorf("error executing `%s` [%v]: %w", sql, args, err)
}
func (*dbWrapper) Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error { func (*dbWrapper) Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
tx, err := getDBReader(ctx) tx, err := getDBReader(ctx)
if err != nil { if err != nil {
return err return sqlError(err, query, args...)
} }
start := time.Now() start := time.Now()
err = tx.Get(dest, query, args...) err = tx.Get(dest, query, args...)
logSQL(start, query, args...) logSQL(start, query, args...)
return err return sqlError(err, query, args...)
} }
func (*dbWrapper) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error { func (*dbWrapper) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
tx, err := getDBReader(ctx) tx, err := getDBReader(ctx)
if err != nil { if err != nil {
return err return sqlError(err, query, args...)
} }
start := time.Now() start := time.Now()
err = tx.Select(dest, query, args...) err = tx.Select(dest, query, args...)
logSQL(start, query, args...) logSQL(start, query, args...)
return err return sqlError(err, query, args...)
} }
func (*dbWrapper) Queryx(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) { func (*dbWrapper) Queryx(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) {
tx, err := getDBReader(ctx) tx, err := getDBReader(ctx)
if err != nil { if err != nil {
return nil, err return nil, sqlError(err, query, args...)
} }
start := time.Now() start := time.Now()
ret, err := tx.Queryx(query, args...) ret, err := tx.Queryx(query, args...)
logSQL(start, query, args...) logSQL(start, query, args...)
return ret, err return ret, sqlError(err, query, args...)
}
func (*dbWrapper) QueryxContext(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) {
tx, err := getDBReader(ctx)
if err != nil {
return nil, sqlError(err, query, args...)
}
start := time.Now()
ret, err := tx.QueryxContext(ctx, query, args...)
logSQL(start, query, args...)
return ret, sqlError(err, query, args...)
} }
func (*dbWrapper) NamedExec(ctx context.Context, query string, arg interface{}) (sql.Result, error) { func (*dbWrapper) NamedExec(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
tx, err := getTx(ctx) tx, err := getTx(ctx)
if err != nil { if err != nil {
return nil, err return nil, sqlError(err, query, arg)
} }
start := time.Now() start := time.Now()
ret, err := tx.NamedExec(query, arg) ret, err := tx.NamedExec(query, arg)
logSQL(start, query, arg) logSQL(start, query, arg)
return ret, err return ret, sqlError(err, query, arg)
} }
func (*dbWrapper) Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { func (*dbWrapper) Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
tx, err := getTx(ctx) tx, err := getTx(ctx)
if err != nil { if err != nil {
return nil, err return nil, sqlError(err, query, args...)
} }
start := time.Now() start := time.Now()
ret, err := tx.Exec(query, args...) ret, err := tx.Exec(query, args...)
logSQL(start, query, args...) logSQL(start, query, args...)
return ret, err return ret, sqlError(err, query, args...)
} }

View File

@@ -1,12 +1,17 @@
package txn package txn
import "context" import (
"context"
"fmt"
)
type Manager interface { type Manager interface {
Begin(ctx context.Context) (context.Context, error) Begin(ctx context.Context) (context.Context, error)
Commit(ctx context.Context) error Commit(ctx context.Context) error
Rollback(ctx context.Context) error Rollback(ctx context.Context) error
IsLocked(err error) bool
AddPostCommitHook(ctx context.Context, hook TxnFunc) AddPostCommitHook(ctx context.Context, hook TxnFunc)
AddPostRollbackHook(ctx context.Context, hook TxnFunc) AddPostRollbackHook(ctx context.Context, hook TxnFunc)
} }
@@ -58,3 +63,33 @@ func WithDatabase(ctx context.Context, p DatabaseProvider, fn TxnFunc) error {
return fn(ctx) return fn(ctx)
} }
type Retryer struct {
Manager Manager
Retries int
OnFail func(ctx context.Context, err error, attempt int) error
}
func (r Retryer) WithTxn(ctx context.Context, fn TxnFunc) error {
var attempt int
var err error
for attempt = 1; attempt <= r.Retries; attempt++ {
err = WithTxn(ctx, r.Manager, fn)
if err == nil {
return nil
}
if !r.Manager.IsLocked(err) {
return err
}
if r.OnFail != nil {
if err := r.OnFail(ctx, err, attempt); err != nil {
return err
}
}
}
return fmt.Errorf("failed after %d attempts: %w", attempt, err)
}