[Files Refactor] Performance tuning (#2813)

* Do database txn in same thread. Retry on locked db
* Remove captions from slimscenedata
* Fix tracing
* Use where in instead of individual selects
* Remove scenes_query view
* Remove image query view
* Remove gallery query view
* Use where in for FindMany
* Don't interrupt scanning zip files
* Fix image filesize sort
This commit is contained in:
WithoutPants
2022-08-11 16:14:57 +10:00
parent 87167736f6
commit 9b31b20fed
19 changed files with 715 additions and 680 deletions

View File

@@ -9,10 +9,6 @@ fragment SlimSceneData on Scene {
organized
interactive
interactive_speed
captions {
language_code
caption_type
}
files {
...VideoFileData

View File

@@ -15,7 +15,11 @@ import (
"github.com/stashapp/stash/pkg/txn"
)
const scanQueueSize = 200000
const (
scanQueueSize = 200000
// maximum number of times to retry in the event of a locked database
maxRetries = 1000
)
// Repository provides access to storage methods for files and folders.
type Repository struct {
@@ -86,7 +90,7 @@ type scanJob struct {
zipPathToID sync.Map
count int
txnMutex sync.Mutex
txnRetryer txn.Retryer
}
// ScanOptions provides options for scanning files.
@@ -113,6 +117,10 @@ func (s *Scanner) Scan(ctx context.Context, handlers []Handler, options ScanOpti
handlers: handlers,
ProgressReports: progressReporter,
options: options,
txnRetryer: txn.Retryer{
Manager: s.Repository,
Retries: maxRetries,
},
}
job.execute(ctx)
@@ -126,10 +134,7 @@ type scanFile struct {
}
func (s *scanJob) withTxn(ctx context.Context, fn func(ctx context.Context) error) error {
// get exclusive access to the database
s.txnMutex.Lock()
defer s.txnMutex.Unlock()
return txn.WithTxn(ctx, s.Repository, fn)
return s.txnRetryer.WithTxn(ctx, fn)
}
func (s *scanJob) withDB(ctx context.Context, fn func(ctx context.Context) error) error {
@@ -157,20 +162,6 @@ func (s *scanJob) execute(ctx context.Context) {
logger.Infof("Finished adding files to queue. %d files queued", s.count)
}()
done := make(chan struct{}, 1)
go func() {
if err := s.processDBOperations(ctx); err != nil {
if errors.Is(err, context.Canceled) {
return
}
logger.Errorf("error processing database operations for scan: %v", err)
}
close(done)
}()
if err := s.processQueue(ctx); err != nil {
if errors.Is(err, context.Canceled) {
return
@@ -179,9 +170,6 @@ func (s *scanJob) execute(ctx context.Context) {
logger.Errorf("error scanning files: %v", err)
return
}
// wait for database operations to complete
<-done
}
func (s *scanJob) queueFiles(ctx context.Context, paths []string) error {
@@ -247,7 +235,10 @@ func (s *scanJob) queueFileFunc(ctx context.Context, f FS, zipFile *scanFile) fs
if info.IsDir() {
// handle folders immediately
if err := s.handleFolder(ctx, ff); err != nil {
logger.Errorf("error processing %q: %v", path, err)
if !errors.Is(err, context.Canceled) {
logger.Errorf("error processing %q: %v", path, err)
}
// skip the directory since we won't be able to process the files anyway
return fs.SkipDir
}
@@ -259,7 +250,9 @@ func (s *scanJob) queueFileFunc(ctx context.Context, f FS, zipFile *scanFile) fs
if zipFile != nil {
s.ProgressReports.ExecuteTask("Scanning "+path, func() {
if err := s.handleFile(ctx, ff); err != nil {
logger.Errorf("error processing %q: %v", path, err)
if !errors.Is(err, context.Canceled) {
logger.Errorf("error processing %q: %v", path, err)
}
// don't return an error, just skip the file
}
})
@@ -355,18 +348,6 @@ func (s *scanJob) incrementProgress() {
}
}
func (s *scanJob) processDBOperations(ctx context.Context) error {
for fn := range s.dbQueue {
if err := ctx.Err(); err != nil {
return err
}
_ = s.withTxn(ctx, fn)
}
return nil
}
func (s *scanJob) processQueueItem(ctx context.Context, f scanFile) {
s.ProgressReports.ExecuteTask("Scanning "+f.Path, func() {
var err error
@@ -376,7 +357,7 @@ func (s *scanJob) processQueueItem(ctx context.Context, f scanFile) {
err = s.handleFile(ctx, f)
}
if err != nil {
if err != nil && !errors.Is(err, context.Canceled) {
logger.Errorf("error processing %q: %v", f.Path, err)
}
})
@@ -552,7 +533,13 @@ func (s *scanJob) handleFile(ctx context.Context, f scanFile) error {
if ff != nil && s.isZipFile(f.info.Name()) {
f.BaseFile = ff.Base()
if err := s.scanZipFile(ctx, f); err != nil {
// scan zip files with a different context that is not cancellable
// cancelling while scanning zip file contents results in the scan
// contents being partially completed
zipCtx := context.Background()
if err := s.scanZipFile(zipCtx, f); err != nil {
logger.Errorf("Error scanning zip file %q: %v", f.Path, err)
}
}
@@ -638,7 +625,7 @@ func (s *scanJob) onNewFile(ctx context.Context, f scanFile) (File, error) {
}
// if not renamed, queue file for creation
if err := s.queueDBOperation(ctx, path, func(ctx context.Context) error {
if err := s.withTxn(ctx, func(ctx context.Context) error {
if err := s.Repository.Create(ctx, file); err != nil {
return fmt.Errorf("creating file %q: %w", path, err)
}
@@ -655,17 +642,6 @@ func (s *scanJob) onNewFile(ctx context.Context, f scanFile) (File, error) {
return file, nil
}
func (s *scanJob) queueDBOperation(ctx context.Context, path string, fn func(ctx context.Context) error) error {
// perform immediately if it is a zip file
if s.isZipFile(path) {
return s.withTxn(ctx, fn)
}
s.dbQueue <- fn
return nil
}
func (s *scanJob) fireDecorators(ctx context.Context, fs FS, f File) (File, error) {
for _, h := range s.FileDecorators {
var err error
@@ -782,7 +758,7 @@ func (s *scanJob) handleRename(ctx context.Context, f File, fp []Fingerprint) (F
fBase.CreatedAt = otherBase.CreatedAt
fBase.Fingerprints = otherBase.Fingerprints
if err := s.queueDBOperation(ctx, fBase.Path, func(ctx context.Context) error {
if err := s.withTxn(ctx, func(ctx context.Context) error {
if err := s.Repository.Update(ctx, f); err != nil {
return fmt.Errorf("updating file for rename %q: %w", fBase.Path, err)
}
@@ -831,7 +807,7 @@ func (s *scanJob) onExistingFile(ctx context.Context, f scanFile, existing File)
return nil, nil
}
if err := s.queueDBOperation(ctx, path, func(ctx context.Context) error {
if err := s.withTxn(ctx, func(ctx context.Context) error {
if err := s.fireHandlers(ctx, existing); err != nil {
return err
}
@@ -866,7 +842,7 @@ func (s *scanJob) onExistingFile(ctx context.Context, f scanFile, existing File)
}
// queue file for update
if err := s.queueDBOperation(ctx, path, func(ctx context.Context) error {
if err := s.withTxn(ctx, func(ctx context.Context) error {
if err := s.Repository.Update(ctx, existing); err != nil {
return fmt.Errorf("updating file %q: %w", path, err)
}

View File

@@ -31,6 +31,10 @@ func (*TxnManager) AddPostCommitHook(ctx context.Context, hook txn.TxnFunc) {
func (*TxnManager) AddPostRollbackHook(ctx context.Context, hook txn.TxnFunc) {
}
func (*TxnManager) IsLocked(err error) bool {
return false
}
func (*TxnManager) Reset() error {
return nil
}

View File

@@ -75,13 +75,18 @@ type Database struct {
}
func NewDatabase() *Database {
return &Database{
File: NewFileStore(),
Folder: NewFolderStore(),
Image: NewImageStore(),
Gallery: NewGalleryStore(),
Scene: NewSceneStore(),
fileStore := NewFileStore()
folderStore := NewFolderStore()
ret := &Database{
File: fileStore,
Folder: folderStore,
Scene: NewSceneStore(fileStore),
Image: NewImageStore(fileStore),
Gallery: NewGalleryStore(fileStore, folderStore),
}
return ret
}
// Ready returns an error if the database is not ready to begin transactions.

View File

@@ -250,12 +250,6 @@ func (r *fileQueryRow) appendRelationships(i *file.BaseFile) {
}
}
func mergeFiles(dest file.File, src file.File) {
if src.Base().Fingerprints != nil {
dest.Base().Fingerprints = appendFingerprintsUnique(dest.Base().Fingerprints, src.Base().Fingerprints...)
}
}
type fileQueryRows []fileQueryRow
func (r fileQueryRows) resolve() []file.File {
@@ -279,11 +273,6 @@ func (r fileQueryRows) resolve() []file.File {
return ret
}
type relatedFileQueryRow struct {
fileQueryRow
Primary null.Bool `db:"primary"`
}
type FileStore struct {
repository

View File

@@ -59,6 +59,23 @@ func (r *galleryRow) fromGallery(o models.Gallery) {
r.UpdatedAt = o.UpdatedAt
}
func (r *galleryRow) resolve() *models.Gallery {
return &models.Gallery{
ID: r.ID,
Title: r.Title.String,
URL: r.URL.String,
Date: r.Date.DatePtr(),
Details: r.Details.String,
Rating: nullIntPtr(r.Rating),
Organized: r.Organized,
StudioID: nullIntPtr(r.StudioID),
FolderID: nullIntFolderIDPtr(r.FolderID),
// FolderPath: r.FolderPath.String,
CreatedAt: r.CreatedAt,
UpdatedAt: r.UpdatedAt,
}
}
type galleryRowRecord struct {
updateRecord
}
@@ -75,113 +92,24 @@ func (r *galleryRowRecord) fromPartial(o models.GalleryPartial) {
r.setTime("updated_at", o.UpdatedAt)
}
type galleryQueryRow struct {
galleryRow
relatedFileQueryRow
FolderPath null.String `db:"folder_path"`
SceneID null.Int `db:"scene_id"`
TagID null.Int `db:"tag_id"`
PerformerID null.Int `db:"performer_id"`
}
func (r *galleryQueryRow) resolve() *models.Gallery {
ret := &models.Gallery{
ID: r.ID,
Title: r.Title.String,
URL: r.URL.String,
Date: r.Date.DatePtr(),
Details: r.Details.String,
Rating: nullIntPtr(r.Rating),
Organized: r.Organized,
StudioID: nullIntPtr(r.StudioID),
FolderID: nullIntFolderIDPtr(r.FolderID),
FolderPath: r.FolderPath.String,
CreatedAt: r.CreatedAt,
UpdatedAt: r.UpdatedAt,
}
r.appendRelationships(ret)
return ret
}
func appendFileUnique(vs []file.File, toAdd file.File, isPrimary bool) []file.File {
// check in reverse, since it's most likely to be the last one
for i := len(vs) - 1; i >= 0; i-- {
if vs[i].Base().ID == toAdd.Base().ID {
// merge the two
mergeFiles(vs[i], toAdd)
return vs
}
}
if !isPrimary {
return append(vs, toAdd)
}
// primary should be first
return append([]file.File{toAdd}, vs...)
}
func (r *galleryQueryRow) appendRelationships(i *models.Gallery) {
if r.TagID.Valid {
i.TagIDs = intslice.IntAppendUnique(i.TagIDs, int(r.TagID.Int64))
}
if r.PerformerID.Valid {
i.PerformerIDs = intslice.IntAppendUnique(i.PerformerIDs, int(r.PerformerID.Int64))
}
if r.SceneID.Valid {
i.SceneIDs = intslice.IntAppendUnique(i.SceneIDs, int(r.SceneID.Int64))
}
if r.relatedFileQueryRow.FileID.Valid {
f := r.fileQueryRow.resolve()
i.Files = appendFileUnique(i.Files, f, r.Primary.Bool)
}
}
type galleryQueryRows []galleryQueryRow
func (r galleryQueryRows) resolve() []*models.Gallery {
var ret []*models.Gallery
var last *models.Gallery
var lastID int
for _, row := range r {
if last == nil || lastID != row.ID {
f := row.resolve()
last = f
lastID = row.ID
ret = append(ret, last)
continue
}
// must be merging with previous row
row.appendRelationships(last)
}
return ret
}
type GalleryStore struct {
repository
tableMgr *table
queryTableMgr *table
tableMgr *table
fileStore *FileStore
folderStore *FolderStore
}
func NewGalleryStore() *GalleryStore {
func NewGalleryStore(fileStore *FileStore, folderStore *FolderStore) *GalleryStore {
return &GalleryStore{
repository: repository{
tableName: galleryTable,
idColumn: idColumn,
},
tableMgr: galleryTableMgr,
queryTableMgr: galleryQueryTableMgr,
tableMgr: galleryTableMgr,
fileStore: fileStore,
folderStore: folderStore,
}
}
@@ -189,10 +117,6 @@ func (qb *GalleryStore) table() exp.IdentifierExpression {
return qb.tableMgr.table
}
func (qb *GalleryStore) queryTable() exp.IdentifierExpression {
return qb.queryTableMgr.table
}
func (qb *GalleryStore) Create(ctx context.Context, newObject *models.Gallery, fileIDs []file.ID) error {
var r galleryRow
r.fromGallery(*newObject)
@@ -298,7 +222,7 @@ func (qb *GalleryStore) Destroy(ctx context.Context, id int) error {
}
func (qb *GalleryStore) selectDataset() *goqu.SelectDataset {
return dialect.From(galleriesQueryTable).Select(galleriesQueryTable.All())
return dialect.From(qb.table()).Select(qb.table().All())
}
func (qb *GalleryStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Gallery, error) {
@@ -316,24 +240,88 @@ func (qb *GalleryStore) get(ctx context.Context, q *goqu.SelectDataset) (*models
func (qb *GalleryStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Gallery, error) {
const single = false
var rows galleryQueryRows
var ret []*models.Gallery
if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error {
var f galleryQueryRow
var f galleryRow
if err := r.StructScan(&f); err != nil {
return err
}
rows = append(rows, f)
s := f.resolve()
if err := qb.resolveRelationships(ctx, s); err != nil {
return err
}
ret = append(ret, s)
return nil
}); err != nil {
return nil, err
}
return rows.resolve(), nil
return ret, nil
}
func (qb *GalleryStore) resolveRelationships(ctx context.Context, s *models.Gallery) error {
var err error
// files
s.Files, err = qb.getFiles(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving gallery files: %w", err)
}
// folder
if s.FolderID != nil {
folder, err := qb.folderStore.Find(ctx, *s.FolderID)
if err != nil {
return fmt.Errorf("resolving gallery folder: %w", err)
}
s.FolderPath = folder.Path
}
// performers
s.PerformerIDs, err = qb.performersRepository().getIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving gallery performers: %w", err)
}
// tags
s.TagIDs, err = qb.tagsRepository().getIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving gallery tags: %w", err)
}
// scenes
s.SceneIDs, err = qb.scenesRepository().getIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving gallery scenes: %w", err)
}
return nil
}
func (qb *GalleryStore) getFiles(ctx context.Context, id int) ([]file.File, error) {
fileIDs, err := qb.filesRepository().get(ctx, id)
if err != nil {
return nil, err
}
// use fileStore to load files
files, err := qb.fileStore.Find(ctx, fileIDs...)
if err != nil {
return nil, err
}
ret := make([]file.File, len(files))
copy(ret, files)
return ret, nil
}
func (qb *GalleryStore) Find(ctx context.Context, id int) (*models.Gallery, error) {
q := qb.selectDataset().Where(qb.queryTableMgr.byID(id))
q := qb.selectDataset().Where(qb.tableMgr.byID(id))
ret, err := qb.get(ctx, q)
if err != nil {
@@ -344,25 +332,30 @@ func (qb *GalleryStore) Find(ctx context.Context, id int) (*models.Gallery, erro
}
func (qb *GalleryStore) FindMany(ctx context.Context, ids []int) ([]*models.Gallery, error) {
var galleries []*models.Gallery
for _, id := range ids {
gallery, err := qb.Find(ctx, id)
if err != nil {
return nil, err
}
q := qb.selectDataset().Prepared(true).Where(qb.table().Col(idColumn).In(ids))
unsorted, err := qb.getMany(ctx, q)
if err != nil {
return nil, err
}
if gallery == nil {
return nil, fmt.Errorf("gallery with id %d not found", id)
}
galleries := make([]*models.Gallery, len(ids))
galleries = append(galleries, gallery)
for _, s := range unsorted {
i := intslice.IntIndex(ids, s.ID)
galleries[i] = s
}
for i := range galleries {
if galleries[i] == nil {
return nil, fmt.Errorf("gallery with id %d not found", ids[i])
}
}
return galleries, nil
}
func (qb *GalleryStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Gallery, error) {
table := qb.queryTable()
table := qb.table()
q := qb.selectDataset().Prepared(true).Where(
table.Col(idColumn).Eq(
@@ -986,6 +979,16 @@ func (qb *GalleryStore) setGallerySort(query *queryBuilder, findFilter *models.F
}
}
func (qb *GalleryStore) filesRepository() *filesRepository {
return &filesRepository{
repository: repository{
tx: qb.tx,
tableName: galleriesFilesTable,
idColumn: galleryIDColumn,
},
}
}
func (qb *GalleryStore) performersRepository() *joinRepository {
return &joinRepository{
repository: repository{
@@ -1027,3 +1030,14 @@ func (qb *GalleryStore) UpdateImages(ctx context.Context, galleryID int, imageID
// Delete the existing joins and then create new ones
return qb.imagesRepository().replace(ctx, galleryID, imageIDs)
}
func (qb *GalleryStore) scenesRepository() *joinRepository {
return &joinRepository{
repository: repository{
tx: qb.tx,
tableName: galleriesScenesTable,
idColumn: galleryIDColumn,
},
fkColumn: sceneIDColumn,
}
}

View File

@@ -51,6 +51,7 @@ func Test_galleryQueryBuilder_Create(t *testing.T) {
SceneIDs: []int{sceneIDs[sceneIdx1WithPerformer], sceneIDs[sceneIdx1WithStudio]},
TagIDs: []int{tagIDs[tagIdx1WithScene], tagIDs[tagIdx1WithDupName]},
PerformerIDs: []int{performerIDs[performerIdx1WithScene], performerIDs[performerIdx1WithDupName]},
Files: []file.File{},
},
false,
},
@@ -202,9 +203,12 @@ func Test_galleryQueryBuilder_Update(t *testing.T) {
Files: []file.File{
makeGalleryFileWithID(galleryIdxWithImage),
},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
SceneIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
},
false,
},
@@ -215,9 +219,12 @@ func Test_galleryQueryBuilder_Update(t *testing.T) {
Files: []file.File{
makeGalleryFileWithID(galleryIdxWithScene),
},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
SceneIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
},
false,
},
@@ -228,9 +235,12 @@ func Test_galleryQueryBuilder_Update(t *testing.T) {
Files: []file.File{
makeGalleryFileWithID(galleryIdxWithTag),
},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
SceneIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
},
false,
},
@@ -241,9 +251,12 @@ func Test_galleryQueryBuilder_Update(t *testing.T) {
Files: []file.File{
makeGalleryFileWithID(galleryIdxWithPerformer),
},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
SceneIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
},
false,
},
@@ -428,6 +441,9 @@ func Test_galleryQueryBuilder_UpdatePartial(t *testing.T) {
Files: []file.File{
makeGalleryFile(galleryIdxWithImage),
},
SceneIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
},
false,
},
@@ -621,7 +637,9 @@ func Test_galleryQueryBuilder_UpdatePartialRelationships(t *testing.T) {
Mode: models.RelationshipUpdateModeRemove,
},
},
models.Gallery{},
models.Gallery{
SceneIDs: []int{},
},
false,
},
{

View File

@@ -48,6 +48,19 @@ func (r *imageRow) fromImage(i models.Image) {
r.UpdatedAt = i.UpdatedAt
}
func (r *imageRow) resolve() *models.Image {
return &models.Image{
ID: r.ID,
Title: r.Title.String,
Rating: nullIntPtr(r.Rating),
Organized: r.Organized,
OCounter: r.OCounter,
StudioID: nullIntPtr(r.StudioID),
CreatedAt: r.CreatedAt,
UpdatedAt: r.UpdatedAt,
}
}
type imageRowRecord struct {
updateRecord
}
@@ -62,109 +75,24 @@ func (r *imageRowRecord) fromPartial(i models.ImagePartial) {
r.setTime("updated_at", i.UpdatedAt)
}
type imageQueryRow struct {
imageRow
relatedFileQueryRow
GalleryID null.Int `db:"gallery_id"`
TagID null.Int `db:"tag_id"`
PerformerID null.Int `db:"performer_id"`
}
func (r *imageQueryRow) resolve() *models.Image {
ret := &models.Image{
ID: r.ID,
Title: r.Title.String,
Rating: nullIntPtr(r.Rating),
Organized: r.Organized,
OCounter: r.OCounter,
StudioID: nullIntPtr(r.StudioID),
CreatedAt: r.CreatedAt,
UpdatedAt: r.UpdatedAt,
}
r.appendRelationships(ret)
return ret
}
func appendImageFileUnique(vs []*file.ImageFile, toAdd *file.ImageFile, isPrimary bool) []*file.ImageFile {
// check in reverse, since it's most likely to be the last one
for i := len(vs) - 1; i >= 0; i-- {
if vs[i].Base().ID == toAdd.Base().ID {
// merge the two
mergeFiles(vs[i], toAdd)
return vs
}
}
if !isPrimary {
return append(vs, toAdd)
}
// primary should be first
return append([]*file.ImageFile{toAdd}, vs...)
}
func (r *imageQueryRow) appendRelationships(i *models.Image) {
if r.GalleryID.Valid {
i.GalleryIDs = intslice.IntAppendUnique(i.GalleryIDs, int(r.GalleryID.Int64))
}
if r.TagID.Valid {
i.TagIDs = intslice.IntAppendUnique(i.TagIDs, int(r.TagID.Int64))
}
if r.PerformerID.Valid {
i.PerformerIDs = intslice.IntAppendUnique(i.PerformerIDs, int(r.PerformerID.Int64))
}
if r.relatedFileQueryRow.FileID.Valid {
f := r.fileQueryRow.resolve().(*file.ImageFile)
i.Files = appendImageFileUnique(i.Files, f, r.Primary.Bool)
}
}
type imageQueryRows []imageQueryRow
func (r imageQueryRows) resolve() []*models.Image {
var ret []*models.Image
var last *models.Image
var lastID int
for _, row := range r {
if last == nil || lastID != row.ID {
f := row.resolve()
last = f
lastID = row.ID
ret = append(ret, last)
continue
}
// must be merging with previous row
row.appendRelationships(last)
}
return ret
}
type ImageStore struct {
repository
tableMgr *table
queryTableMgr *table
tableMgr *table
oCounterManager
fileStore *FileStore
}
func NewImageStore() *ImageStore {
func NewImageStore(fileStore *FileStore) *ImageStore {
return &ImageStore{
repository: repository{
tableName: imageTable,
idColumn: idColumn,
},
tableMgr: imageTableMgr,
queryTableMgr: imageQueryTableMgr,
oCounterManager: oCounterManager{imageTableMgr},
fileStore: fileStore,
}
}
@@ -172,10 +100,6 @@ func (qb *ImageStore) table() exp.IdentifierExpression {
return qb.tableMgr.table
}
func (qb *ImageStore) queryTable() exp.IdentifierExpression {
return qb.queryTableMgr.table
}
func (qb *ImageStore) Create(ctx context.Context, newObject *models.ImageCreateInput) error {
var r imageRow
r.fromImage(*newObject.Image)
@@ -291,21 +215,30 @@ func (qb *ImageStore) Find(ctx context.Context, id int) (*models.Image, error) {
}
func (qb *ImageStore) FindMany(ctx context.Context, ids []int) ([]*models.Image, error) {
var images []*models.Image
for _, id := range ids {
image, err := qb.Find(ctx, id)
if err != nil {
return nil, err
}
q := qb.selectDataset().Prepared(true).Where(qb.table().Col(idColumn).In(ids))
unsorted, err := qb.getMany(ctx, q)
if err != nil {
return nil, err
}
images = append(images, image)
images := make([]*models.Image, len(ids))
for _, s := range unsorted {
i := intslice.IntIndex(ids, s.ID)
images[i] = s
}
for i := range images {
if images[i] == nil {
return nil, fmt.Errorf("image with id %d not found", ids[i])
}
}
return images, nil
}
func (qb *ImageStore) selectDataset() *goqu.SelectDataset {
return dialect.From(imagesQueryTable).Select(imagesQueryTable.All())
return dialect.From(qb.table()).Select(qb.table().All())
}
func (qb *ImageStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Image, error) {
@@ -323,24 +256,84 @@ func (qb *ImageStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.I
func (qb *ImageStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Image, error) {
const single = false
var rows imageQueryRows
var ret []*models.Image
if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error {
var f imageQueryRow
var f imageRow
if err := r.StructScan(&f); err != nil {
return err
}
rows = append(rows, f)
i := f.resolve()
if err := qb.resolveRelationships(ctx, i); err != nil {
return err
}
ret = append(ret, i)
return nil
}); err != nil {
return nil, err
}
return rows.resolve(), nil
return ret, nil
}
func (qb *ImageStore) resolveRelationships(ctx context.Context, i *models.Image) error {
var err error
// files
i.Files, err = qb.getFiles(ctx, i.ID)
if err != nil {
return fmt.Errorf("resolving image files: %w", err)
}
// performers
i.PerformerIDs, err = qb.performersRepository().getIDs(ctx, i.ID)
if err != nil {
return fmt.Errorf("resolving image performers: %w", err)
}
// tags
i.TagIDs, err = qb.tagsRepository().getIDs(ctx, i.ID)
if err != nil {
return fmt.Errorf("resolving image tags: %w", err)
}
// galleries
i.GalleryIDs, err = qb.galleriesRepository().getIDs(ctx, i.ID)
if err != nil {
return fmt.Errorf("resolving image galleries: %w", err)
}
return nil
}
func (qb *ImageStore) getFiles(ctx context.Context, id int) ([]*file.ImageFile, error) {
fileIDs, err := qb.filesRepository().get(ctx, id)
if err != nil {
return nil, err
}
// use fileStore to load files
files, err := qb.fileStore.Find(ctx, fileIDs...)
if err != nil {
return nil, err
}
ret := make([]*file.ImageFile, len(files))
for i, f := range files {
var ok bool
ret[i], ok = f.(*file.ImageFile)
if !ok {
return nil, fmt.Errorf("expected file to be *file.ImageFile not %T", f)
}
}
return ret, nil
}
func (qb *ImageStore) find(ctx context.Context, id int) (*models.Image, error) {
q := qb.selectDataset().Where(qb.queryTableMgr.byID(id))
q := qb.selectDataset().Where(qb.tableMgr.byID(id))
ret, err := qb.get(ctx, q)
if err != nil {
@@ -351,7 +344,7 @@ func (qb *ImageStore) find(ctx context.Context, id int) (*models.Image, error) {
}
func (qb *ImageStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Image, error) {
table := qb.queryTable()
table := qb.table()
q := qb.selectDataset().Prepared(true).Where(
table.Col(idColumn).Eq(
@@ -430,7 +423,8 @@ func (qb *ImageStore) FindByChecksum(ctx context.Context, checksum string) ([]*m
func (qb *ImageStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*models.Image, error) {
table := qb.table()
queryTable := qb.queryTable()
fileTable := fileTableMgr.table
folderTable := folderTableMgr.table
sq := dialect.From(table).
InnerJoin(
@@ -441,11 +435,20 @@ func (qb *ImageStore) FindByGalleryID(ctx context.Context, galleryID int) ([]*mo
galleriesImagesJoinTable.Col("gallery_id").Eq(galleryID),
)
q := qb.selectDataset().Prepared(true).Where(
queryTable.Col(idColumn).Eq(
q := qb.selectDataset().Prepared(true).LeftJoin(
imagesFilesJoinTable,
goqu.On(imagesFilesJoinTable.Col(imageIDColumn).Eq(table.Col(idColumn))),
).LeftJoin(
fileTable,
goqu.On(fileTable.Col(idColumn).Eq(imagesFilesJoinTable.Col(fileIDColumn))),
).LeftJoin(
folderTable,
goqu.On(folderTable.Col(idColumn).Eq(fileTable.Col("parent_folder_id"))),
).Where(
table.Col(idColumn).Eq(
sq,
),
).Order(queryTable.Col("parent_folder_path").Asc(), queryTable.Col("basename").Asc())
).Order(folderTable.Col("path").Asc(), fileTable.Col("basename").Asc())
ret, err := qb.getMany(ctx, q)
if err != nil {
@@ -969,7 +972,7 @@ func (qb *ImageStore) setImageSortAndPagination(q *queryBuilder, findFilter *mod
sortClause = getCountSort(imageTable, imagesTagsTable, imageIDColumn, direction)
case "performer_count":
sortClause = getCountSort(imageTable, performersImagesTable, imageIDColumn, direction)
case "mod_time", "size":
case "mod_time", "filesize":
addFilesJoin()
sortClause = getSort(sort, direction, "files")
default:
@@ -991,6 +994,16 @@ func (qb *ImageStore) galleriesRepository() *joinRepository {
}
}
func (qb *ImageStore) filesRepository() *filesRepository {
return &filesRepository{
repository: repository{
tx: qb.tx,
tableName: imagesFilesTable,
idColumn: imageIDColumn,
},
}
}
// func (qb *imageQueryBuilder) GetGalleryIDs(ctx context.Context, imageID int) ([]int, error) {
// return qb.galleriesRepository().getIDs(ctx, imageID)
// }

View File

@@ -44,6 +44,7 @@ func Test_imageQueryBuilder_Create(t *testing.T) {
GalleryIDs: []int{galleryIDs[galleryIdxWithImage]},
TagIDs: []int{tagIDs[tagIdx1WithImage], tagIDs[tagIdx1WithDupName]},
PerformerIDs: []int{performerIDs[performerIdx1WithImage], performerIDs[performerIdx1WithDupName]},
Files: []*file.ImageFile{},
},
false,
},
@@ -193,9 +194,12 @@ func Test_imageQueryBuilder_Update(t *testing.T) {
Files: []*file.ImageFile{
makeImageFileWithID(imageIdxWithGallery),
},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
},
false,
},
@@ -206,9 +210,12 @@ func Test_imageQueryBuilder_Update(t *testing.T) {
Files: []*file.ImageFile{
makeImageFileWithID(imageIdxWithGallery),
},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
},
false,
},
@@ -219,9 +226,12 @@ func Test_imageQueryBuilder_Update(t *testing.T) {
Files: []*file.ImageFile{
makeImageFileWithID(imageIdxWithTag),
},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
},
false,
},
@@ -232,9 +242,12 @@ func Test_imageQueryBuilder_Update(t *testing.T) {
Files: []*file.ImageFile{
makeImageFileWithID(imageIdxWithPerformer),
},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Organized: true,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
},
false,
},
@@ -403,6 +416,9 @@ func Test_imageQueryBuilder_UpdatePartial(t *testing.T) {
Files: []*file.ImageFile{
makeImageFile(imageIdx1WithGallery),
},
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
},
false,
},
@@ -596,7 +612,9 @@ func Test_imageQueryBuilder_UpdatePartialRelationships(t *testing.T) {
Mode: models.RelationshipUpdateModeRemove,
},
},
models.Image{},
models.Image{
GalleryIDs: []int{},
},
false,
},
{
@@ -2398,7 +2416,7 @@ func TestImageQuerySorting(t *testing.T) {
},
{
"file size",
"size",
"filesize",
models.SortDirectionEnumDesc,
-1,
-1,

View File

@@ -539,139 +539,3 @@ ALTER TABLE `scenes_new` rename to `scenes`;
CREATE INDEX `index_scenes_on_studio_id` on `scenes` (`studio_id`);
PRAGMA foreign_keys=ON;
-- create views to simplify queries
CREATE VIEW `images_query` AS
SELECT
`images`.`id`,
`images`.`title`,
`images`.`rating`,
`images`.`organized`,
`images`.`o_counter`,
`images`.`studio_id`,
`images`.`created_at`,
`images`.`updated_at`,
`galleries_images`.`gallery_id`,
`images_tags`.`tag_id`,
`performers_images`.`performer_id`,
`image_files`.`format` as `image_format`,
`image_files`.`width` as `image_width`,
`image_files`.`height` as `image_height`,
`files`.`id` as `file_id`,
`files`.`basename`,
`files`.`size`,
`files`.`mod_time`,
`files`.`zip_file_id`,
`folders`.`id` as `parent_folder_id`,
`folders`.`path` as `parent_folder_path`,
`zip_files`.`basename` as `zip_basename`,
`zip_files_folders`.`path` as `zip_folder_path`,
`files_fingerprints`.`type` as `fingerprint_type`,
`files_fingerprints`.`fingerprint`
FROM `images`
LEFT JOIN `performers_images` ON (`images`.`id` = `performers_images`.`image_id`)
LEFT JOIN `galleries_images` ON (`images`.`id` = `galleries_images`.`image_id`)
LEFT JOIN `images_tags` ON (`images`.`id` = `images_tags`.`image_id`)
LEFT JOIN `images_files` ON (`images`.`id` = `images_files`.`image_id`)
LEFT JOIN `image_files` ON (`images_files`.`file_id` = `image_files`.`file_id`)
LEFT JOIN `files` ON (`images_files`.`file_id` = `files`.`id`)
LEFT JOIN `folders` ON (`files`.`parent_folder_id` = `folders`.`id`)
LEFT JOIN `files` AS `zip_files` ON (`files`.`zip_file_id` = `zip_files`.`id`)
LEFT JOIN `folders` AS `zip_files_folders` ON (`zip_files`.`parent_folder_id` = `zip_files_folders`.`id`)
LEFT JOIN `files_fingerprints` ON (`images_files`.`file_id` = `files_fingerprints`.`file_id`);
CREATE VIEW `galleries_query` AS
SELECT
`galleries`.`id`,
`galleries`.`title`,
`galleries`.`url`,
`galleries`.`date`,
`galleries`.`details`,
`galleries`.`rating`,
`galleries`.`organized`,
`galleries`.`studio_id`,
`galleries`.`created_at`,
`galleries`.`updated_at`,
`galleries_tags`.`tag_id`,
`scenes_galleries`.`scene_id`,
`performers_galleries`.`performer_id`,
`galleries_folders`.`id` as `folder_id`,
`galleries_folders`.`path` as `folder_path`,
`files`.`id` as `file_id`,
`files`.`basename`,
`files`.`size`,
`files`.`mod_time`,
`files`.`zip_file_id`,
`parent_folders`.`id` as `parent_folder_id`,
`parent_folders`.`path` as `parent_folder_path`,
`zip_files`.`basename` as `zip_basename`,
`zip_files_folders`.`path` as `zip_folder_path`,
`files_fingerprints`.`type` as `fingerprint_type`,
`files_fingerprints`.`fingerprint`
FROM `galleries`
LEFT JOIN `performers_galleries` ON (`galleries`.`id` = `performers_galleries`.`gallery_id`)
LEFT JOIN `galleries_tags` ON (`galleries`.`id` = `galleries_tags`.`gallery_id`)
LEFT JOIN `scenes_galleries` ON (`galleries`.`id` = `scenes_galleries`.`gallery_id`)
LEFT JOIN `folders` AS `galleries_folders` ON (`galleries`.`folder_id` = `galleries_folders`.`id`)
LEFT JOIN `galleries_files` ON (`galleries`.`id` = `galleries_files`.`gallery_id`)
LEFT JOIN `files` ON (`galleries_files`.`file_id` = `files`.`id`)
LEFT JOIN `folders` AS `parent_folders` ON (`files`.`parent_folder_id` = `parent_folders`.`id`)
LEFT JOIN `files` AS `zip_files` ON (`files`.`zip_file_id` = `zip_files`.`id`)
LEFT JOIN `folders` AS `zip_files_folders` ON (`zip_files`.`parent_folder_id` = `zip_files_folders`.`id`)
LEFT JOIN `files_fingerprints` ON (`galleries_files`.`file_id` = `files_fingerprints`.`file_id`);
CREATE VIEW `scenes_query` AS
SELECT
`scenes`.`id`,
`scenes`.`title`,
`scenes`.`details`,
`scenes`.`url`,
`scenes`.`date`,
`scenes`.`rating`,
`scenes`.`studio_id`,
`scenes`.`o_counter`,
`scenes`.`organized`,
`scenes`.`created_at`,
`scenes`.`updated_at`,
`scenes_tags`.`tag_id`,
`scenes_galleries`.`gallery_id`,
`performers_scenes`.`performer_id`,
`movies_scenes`.`movie_id`,
`movies_scenes`.`scene_index`,
`scene_stash_ids`.`stash_id`,
`scene_stash_ids`.`endpoint`,
`video_files`.`format` as `video_format`,
`video_files`.`width` as `video_width`,
`video_files`.`height` as `video_height`,
`video_files`.`duration`,
`video_files`.`video_codec`,
`video_files`.`audio_codec`,
`video_files`.`frame_rate`,
`video_files`.`bit_rate`,
`video_files`.`interactive`,
`video_files`.`interactive_speed`,
`files`.`id` as `file_id`,
`files`.`basename`,
`files`.`size`,
`files`.`mod_time`,
`files`.`zip_file_id`,
`folders`.`id` as `parent_folder_id`,
`folders`.`path` as `parent_folder_path`,
`zip_files`.`basename` as `zip_basename`,
`zip_files_folders`.`path` as `zip_folder_path`,
`files_fingerprints`.`type` as `fingerprint_type`,
`files_fingerprints`.`fingerprint`
FROM `scenes`
LEFT JOIN `performers_scenes` ON (`scenes`.`id` = `performers_scenes`.`scene_id`)
LEFT JOIN `scenes_tags` ON (`scenes`.`id` = `scenes_tags`.`scene_id`)
LEFT JOIN `movies_scenes` ON (`scenes`.`id` = `movies_scenes`.`scene_id`)
LEFT JOIN `scene_stash_ids` ON (`scenes`.`id` = `scene_stash_ids`.`scene_id`)
LEFT JOIN `scenes_galleries` ON (`scenes`.`id` = `scenes_galleries`.`scene_id`)
LEFT JOIN `scenes_files` ON (`scenes`.`id` = `scenes_files`.`scene_id`)
LEFT JOIN `video_files` ON (`scenes_files`.`file_id` = `video_files`.`file_id`)
LEFT JOIN `files` ON (`scenes_files`.`file_id` = `files`.`id`)
LEFT JOIN `folders` ON (`files`.`parent_folder_id` = `folders`.`id`)
LEFT JOIN `files` AS `zip_files` ON (`files`.`zip_file_id` = `zip_files`.`id`)
LEFT JOIN `folders` AS `zip_files_folders` ON (`zip_files`.`parent_folder_id` = `zip_files_folders`.`id`)
LEFT JOIN `files_fingerprints` ON (`scenes_files`.`file_id` = `files_fingerprints`.`file_id`);

View File

@@ -32,6 +32,11 @@ func (r *repository) getByID(ctx context.Context, id int, dest interface{}) erro
return r.tx.Get(ctx, dest, stmt, id)
}
func (r *repository) getAll(ctx context.Context, id int, f func(rows *sqlx.Rows) error) error {
stmt := fmt.Sprintf("SELECT * FROM %s WHERE %s = ?", r.tableName, r.idColumn)
return r.queryFunc(ctx, stmt, []interface{}{id}, false, f)
}
func (r *repository) insert(ctx context.Context, obj interface{}) (sql.Result, error) {
stmt := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", r.tableName, listKeys(obj, false), listKeys(obj, true))
return r.tx.NamedExec(ctx, stmt, obj)
@@ -468,6 +473,41 @@ func (r *stashIDRepository) replace(ctx context.Context, id int, newIDs []*model
return nil
}
type filesRepository struct {
repository
}
func (r *filesRepository) get(ctx context.Context, id int) ([]file.ID, error) {
query := fmt.Sprintf("SELECT file_id, `primary` from %s WHERE %s = ?", r.tableName, r.idColumn)
type relatedFile struct {
FileID file.ID `db:"file_id"`
Primary bool `db:"primary"`
}
var ret []file.ID
if err := r.queryFunc(ctx, query, []interface{}{id}, false, func(rows *sqlx.Rows) error {
var f relatedFile
if err := rows.StructScan(&f); err != nil {
return err
}
if f.Primary {
// prepend to list
ret = append([]file.ID{f.FileID}, ret...)
} else {
ret = append(ret, f.FileID)
}
return nil
}); err != nil {
return nil, err
}
return ret, nil
}
func listKeys(i interface{}, addPrefix bool) string {
var query []string
v := reflect.ValueOf(i)

View File

@@ -82,6 +82,22 @@ func (r *sceneRow) fromScene(o models.Scene) {
r.UpdatedAt = o.UpdatedAt
}
func (r *sceneRow) resolve() *models.Scene {
return &models.Scene{
ID: r.ID,
Title: r.Title.String,
Details: r.Details.String,
URL: r.URL.String,
Date: r.Date.DatePtr(),
Rating: nullIntPtr(r.Rating),
Organized: r.Organized,
OCounter: r.OCounter,
StudioID: nullIntPtr(r.StudioID),
CreatedAt: r.CreatedAt,
UpdatedAt: r.UpdatedAt,
}
}
type sceneRowRecord struct {
updateRecord
}
@@ -99,139 +115,16 @@ func (r *sceneRowRecord) fromPartial(o models.ScenePartial) {
r.setTime("updated_at", o.UpdatedAt)
}
type sceneQueryRow struct {
sceneRow
relatedFileQueryRow
GalleryID null.Int `db:"gallery_id"`
TagID null.Int `db:"tag_id"`
PerformerID null.Int `db:"performer_id"`
moviesScenesRow
stashIDRow
}
func (r *sceneQueryRow) resolve() *models.Scene {
ret := &models.Scene{
ID: r.ID,
Title: r.Title.String,
Details: r.Details.String,
URL: r.URL.String,
Date: r.Date.DatePtr(),
Rating: nullIntPtr(r.Rating),
Organized: r.Organized,
OCounter: r.OCounter,
StudioID: nullIntPtr(r.StudioID),
CreatedAt: r.CreatedAt,
UpdatedAt: r.UpdatedAt,
}
r.appendRelationships(ret)
return ret
}
func movieAppendUnique(e []models.MoviesScenes, toAdd models.MoviesScenes) []models.MoviesScenes {
for _, ee := range e {
if ee.Equal(toAdd) {
return e
}
}
return append(e, toAdd)
}
func stashIDAppendUnique(e []models.StashID, toAdd models.StashID) []models.StashID {
for _, ee := range e {
if ee == toAdd {
return e
}
}
return append(e, toAdd)
}
func appendVideoFileUnique(vs []*file.VideoFile, toAdd *file.VideoFile, isPrimary bool) []*file.VideoFile {
// check in reverse, since it's most likely to be the last one
for i := len(vs) - 1; i >= 0; i-- {
if vs[i].Base().ID == toAdd.Base().ID {
// merge the two
mergeFiles(vs[i], toAdd)
return vs
}
}
if !isPrimary {
return append(vs, toAdd)
}
// primary should be first
return append([]*file.VideoFile{toAdd}, vs...)
}
func (r *sceneQueryRow) appendRelationships(i *models.Scene) {
if r.TagID.Valid {
i.TagIDs = intslice.IntAppendUnique(i.TagIDs, int(r.TagID.Int64))
}
if r.PerformerID.Valid {
i.PerformerIDs = intslice.IntAppendUnique(i.PerformerIDs, int(r.PerformerID.Int64))
}
if r.GalleryID.Valid {
i.GalleryIDs = intslice.IntAppendUnique(i.GalleryIDs, int(r.GalleryID.Int64))
}
if r.MovieID.Valid {
i.Movies = movieAppendUnique(i.Movies, models.MoviesScenes{
MovieID: int(r.MovieID.Int64),
SceneIndex: nullIntPtr(r.SceneIndex),
})
}
if r.StashID.Valid {
i.StashIDs = stashIDAppendUnique(i.StashIDs, models.StashID{
StashID: r.StashID.String,
Endpoint: r.Endpoint.String,
})
}
if r.relatedFileQueryRow.FileID.Valid {
f := r.fileQueryRow.resolve().(*file.VideoFile)
i.Files = appendVideoFileUnique(i.Files, f, r.Primary.Bool)
}
}
type sceneQueryRows []sceneQueryRow
func (r sceneQueryRows) resolve() []*models.Scene {
var ret []*models.Scene
var last *models.Scene
var lastID int
for _, row := range r {
if last == nil || lastID != row.ID {
f := row.resolve()
last = f
lastID = row.ID
ret = append(ret, last)
continue
}
// must be merging with previous row
row.appendRelationships(last)
}
return ret
}
type SceneStore struct {
repository
tableMgr *table
queryTableMgr *table
tableMgr *table
oCounterManager
fileStore *FileStore
}
func NewSceneStore() *SceneStore {
func NewSceneStore(fileStore *FileStore) *SceneStore {
return &SceneStore{
repository: repository{
tableName: sceneTable,
@@ -239,8 +132,8 @@ func NewSceneStore() *SceneStore {
},
tableMgr: sceneTableMgr,
queryTableMgr: sceneQueryTableMgr,
oCounterManager: oCounterManager{sceneTableMgr},
fileStore: fileStore,
}
}
@@ -248,10 +141,6 @@ func (qb *SceneStore) table() exp.IdentifierExpression {
return qb.tableMgr.table
}
func (qb *SceneStore) queryTable() exp.IdentifierExpression {
return qb.queryTableMgr.table
}
func (qb *SceneStore) Create(ctx context.Context, newObject *models.Scene, fileIDs []file.ID) error {
var r sceneRow
r.fromScene(*newObject)
@@ -392,25 +281,32 @@ func (qb *SceneStore) Find(ctx context.Context, id int) (*models.Scene, error) {
}
func (qb *SceneStore) FindMany(ctx context.Context, ids []int) ([]*models.Scene, error) {
var scenes []*models.Scene
for _, id := range ids {
scene, err := qb.Find(ctx, id)
if err != nil {
return nil, err
}
table := qb.table()
q := qb.selectDataset().Prepared(true).Where(table.Col(idColumn).In(ids))
unsorted, err := qb.getMany(ctx, q)
if err != nil {
return nil, err
}
if scene == nil {
return nil, fmt.Errorf("scene with id %d not found", id)
}
scenes := make([]*models.Scene, len(ids))
scenes = append(scenes, scene)
for _, s := range unsorted {
i := intslice.IntIndex(ids, s.ID)
scenes[i] = s
}
for i := range scenes {
if scenes[i] == nil {
return nil, fmt.Errorf("scene with id %d not found", ids[i])
}
}
return scenes, nil
}
func (qb *SceneStore) selectDataset() *goqu.SelectDataset {
return dialect.From(scenesQueryTable).Select(scenesQueryTable.All())
table := qb.table()
return dialect.From(table).Select(table.All())
}
func (qb *SceneStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.Scene, error) {
@@ -428,24 +324,127 @@ func (qb *SceneStore) get(ctx context.Context, q *goqu.SelectDataset) (*models.S
func (qb *SceneStore) getMany(ctx context.Context, q *goqu.SelectDataset) ([]*models.Scene, error) {
const single = false
var rows sceneQueryRows
var ret []*models.Scene
if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error {
var f sceneQueryRow
var f sceneRow
if err := r.StructScan(&f); err != nil {
return err
}
rows = append(rows, f)
s := f.resolve()
if err := qb.resolveRelationships(ctx, s); err != nil {
return err
}
ret = append(ret, s)
return nil
}); err != nil {
return nil, err
}
return rows.resolve(), nil
return ret, nil
}
func (qb *SceneStore) resolveRelationships(ctx context.Context, s *models.Scene) error {
var err error
// files
s.Files, err = qb.getFiles(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving scene files: %w", err)
}
// movies
s.Movies, err = qb.getMovies(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving scene movies: %w", err)
}
// performers
s.PerformerIDs, err = qb.performersRepository().getIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving scene performers: %w", err)
}
// tags
s.TagIDs, err = qb.tagsRepository().getIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving scene tags: %w", err)
}
// galleries
s.GalleryIDs, err = qb.galleriesRepository().getIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving scene galleries: %w", err)
}
// stash ids
s.StashIDs, err = qb.getStashIDs(ctx, s.ID)
if err != nil {
return fmt.Errorf("resolving scene stash ids: %w", err)
}
return nil
}
func (qb *SceneStore) getFiles(ctx context.Context, id int) ([]*file.VideoFile, error) {
fileIDs, err := qb.filesRepository().get(ctx, id)
if err != nil {
return nil, err
}
// use fileStore to load files
files, err := qb.fileStore.Find(ctx, fileIDs...)
if err != nil {
return nil, err
}
ret := make([]*file.VideoFile, len(files))
for i, f := range files {
var ok bool
ret[i], ok = f.(*file.VideoFile)
if !ok {
return nil, fmt.Errorf("expected file to be *file.VideoFile not %T", f)
}
}
return ret, nil
}
func (qb *SceneStore) getMovies(ctx context.Context, id int) (ret []models.MoviesScenes, err error) {
ret = []models.MoviesScenes{}
if err := qb.moviesRepository().getAll(ctx, id, func(rows *sqlx.Rows) error {
var ms moviesScenesRow
if err := rows.StructScan(&ms); err != nil {
return err
}
ret = append(ret, ms.resolve(id))
return nil
}); err != nil {
return nil, err
}
return ret, nil
}
func (qb *SceneStore) getStashIDs(ctx context.Context, id int) ([]models.StashID, error) {
stashIDs, err := qb.stashIDRepository().get(ctx, id)
if err != nil {
return nil, err
}
ret := make([]models.StashID, len(stashIDs))
for i, sid := range stashIDs {
ret[i] = *sid
}
return ret, nil
}
func (qb *SceneStore) find(ctx context.Context, id int) (*models.Scene, error) {
q := qb.selectDataset().Where(qb.queryTableMgr.byID(id))
q := qb.selectDataset().Where(qb.tableMgr.byID(id))
ret, err := qb.get(ctx, q)
if err != nil {
@@ -552,7 +551,7 @@ func (qb *SceneStore) FindByPath(ctx context.Context, p string) ([]*models.Scene
}
func (qb *SceneStore) findBySubquery(ctx context.Context, sq *goqu.SelectDataset) ([]*models.Scene, error) {
table := qb.queryTable()
table := qb.table()
q := qb.selectDataset().Where(
table.Col(idColumn).Eq(
@@ -706,16 +705,28 @@ func (qb *SceneStore) Wall(ctx context.Context, q *string) ([]*models.Scene, err
s = *q
}
table := qb.queryTable()
table := qb.table()
qq := qb.selectDataset().Prepared(true).Where(table.Col("details").Like("%" + s + "%")).Order(goqu.L("RANDOM()").Asc()).Limit(80)
return qb.getMany(ctx, qq)
}
func (qb *SceneStore) All(ctx context.Context) ([]*models.Scene, error) {
return qb.getMany(ctx, qb.selectDataset().Order(
qb.queryTable().Col("parent_folder_path").Asc(),
qb.queryTable().Col("basename").Asc(),
qb.queryTable().Col("date").Asc(),
table := qb.table()
fileTable := fileTableMgr.table
folderTable := folderTableMgr.table
return qb.getMany(ctx, qb.selectDataset().LeftJoin(
scenesFilesJoinTable,
goqu.On(scenesFilesJoinTable.Col(sceneIDColumn).Eq(table.Col(idColumn))),
).LeftJoin(
fileTable,
goqu.On(fileTable.Col(idColumn).Eq(scenesFilesJoinTable.Col(fileIDColumn))),
).LeftJoin(
folderTable,
goqu.On(folderTable.Col(idColumn).Eq(fileTable.Col("parent_folder_id"))),
).Order(
folderTable.Col("path").Asc(),
fileTable.Col("basename").Asc(),
table.Col("date").Asc(),
))
}
@@ -1388,6 +1399,16 @@ func (qb *SceneStore) moviesRepository() *repository {
}
}
func (qb *SceneStore) filesRepository() *filesRepository {
return &filesRepository{
repository: repository{
tx: qb.tx,
tableName: scenesFilesTable,
idColumn: sceneIDColumn,
},
}
}
func (qb *SceneStore) performersRepository() *joinRepository {
return &joinRepository{
repository: repository{

View File

@@ -83,6 +83,7 @@ func Test_sceneQueryBuilder_Create(t *testing.T) {
Endpoint: endpoint2,
},
},
Files: []*file.VideoFile{},
},
false,
},
@@ -300,6 +301,11 @@ func Test_sceneQueryBuilder_Update(t *testing.T) {
Files: []*file.VideoFile{
makeSceneFileWithID(sceneIdxWithSpacedName),
},
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Movies: []models.MoviesScenes{},
StashIDs: []models.StashID{},
},
false,
},
@@ -310,6 +316,11 @@ func Test_sceneQueryBuilder_Update(t *testing.T) {
Files: []*file.VideoFile{
makeSceneFileWithID(sceneIdxWithGallery),
},
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Movies: []models.MoviesScenes{},
StashIDs: []models.StashID{},
},
false,
},
@@ -320,6 +331,11 @@ func Test_sceneQueryBuilder_Update(t *testing.T) {
Files: []*file.VideoFile{
makeSceneFileWithID(sceneIdxWithTag),
},
TagIDs: []int{},
GalleryIDs: []int{},
PerformerIDs: []int{},
Movies: []models.MoviesScenes{},
StashIDs: []models.StashID{},
},
false,
},
@@ -330,6 +346,11 @@ func Test_sceneQueryBuilder_Update(t *testing.T) {
Files: []*file.VideoFile{
makeSceneFileWithID(sceneIdxWithPerformer),
},
PerformerIDs: []int{},
TagIDs: []int{},
GalleryIDs: []int{},
Movies: []models.MoviesScenes{},
StashIDs: []models.StashID{},
},
false,
},
@@ -340,6 +361,11 @@ func Test_sceneQueryBuilder_Update(t *testing.T) {
Files: []*file.VideoFile{
makeSceneFileWithID(sceneIdxWithMovie),
},
Movies: []models.MoviesScenes{},
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
StashIDs: []models.StashID{},
},
false,
},
@@ -350,7 +376,12 @@ func Test_sceneQueryBuilder_Update(t *testing.T) {
Files: []*file.VideoFile{
makeSceneFileWithID(sceneIdxWithGallery),
},
StudioID: &invalidID,
StudioID: &invalidID,
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Movies: []models.MoviesScenes{},
StashIDs: []models.StashID{},
},
true,
},
@@ -575,6 +606,11 @@ func Test_sceneQueryBuilder_UpdatePartial(t *testing.T) {
Files: []*file.VideoFile{
makeSceneFile(sceneIdxWithSpacedName),
},
GalleryIDs: []int{},
TagIDs: []int{},
PerformerIDs: []int{},
Movies: []models.MoviesScenes{},
StashIDs: []models.StashID{},
},
false,
},
@@ -738,7 +774,7 @@ func Test_sceneQueryBuilder_UpdatePartialRelationships(t *testing.T) {
},
},
models.Scene{
StashIDs: append(stashIDs, []models.StashID{sceneStashID(sceneIdxWithSpacedName)}...),
StashIDs: append([]models.StashID{sceneStashID(sceneIdxWithSpacedName)}, stashIDs...),
},
false,
},
@@ -892,7 +928,9 @@ func Test_sceneQueryBuilder_UpdatePartialRelationships(t *testing.T) {
Mode: models.RelationshipUpdateModeRemove,
},
},
models.Scene{},
models.Scene{
GalleryIDs: []int{},
},
false,
},
{
@@ -936,7 +974,9 @@ func Test_sceneQueryBuilder_UpdatePartialRelationships(t *testing.T) {
Mode: models.RelationshipUpdateModeRemove,
},
},
models.Scene{},
models.Scene{
Movies: []models.MoviesScenes{},
},
false,
},
{
@@ -948,7 +988,9 @@ func Test_sceneQueryBuilder_UpdatePartialRelationships(t *testing.T) {
Mode: models.RelationshipUpdateModeRemove,
},
},
models.Scene{},
models.Scene{
StashIDs: []models.StashID{},
},
false,
},
{

View File

@@ -458,10 +458,6 @@ var (
)
func indexesToIDs(ids []int, indexes []int) []int {
if len(indexes) == 0 {
return nil
}
ret := make([]int, len(indexes))
for i, idx := range indexes {
ret[i] = ids[idx]
@@ -964,13 +960,10 @@ func makeScene(i int) *models.Scene {
mids := indexesToIDs(movieIDs, sceneMovies[i])
var movies []models.MoviesScenes
if len(mids) > 0 {
movies = make([]models.MoviesScenes, len(mids))
for i, m := range mids {
movies[i] = models.MoviesScenes{
MovieID: m,
}
movies := make([]models.MoviesScenes, len(mids))
for i, m := range mids {
movies[i] = models.MoviesScenes{
MovieID: m,
}
}

View File

@@ -361,6 +361,7 @@ type scenesMoviesTable struct {
}
type moviesScenesRow struct {
SceneID null.Int `db:"scene_id"`
MovieID null.Int `db:"movie_id"`
SceneIndex null.Int `db:"scene_index"`
}
@@ -552,12 +553,8 @@ func queryFunc(ctx context.Context, query *goqu.SelectDataset, single bool, f fu
return err
}
tx, err := getDBReader(ctx)
if err != nil {
return err
}
rows, err := tx.QueryxContext(ctx, q, args...)
wrapper := dbWrapper{}
rows, err := wrapper.QueryxContext(ctx, q, args...)
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return fmt.Errorf("querying `%s` [%v]: %w", q, args, err)
@@ -586,12 +583,8 @@ func querySimple(ctx context.Context, query *goqu.SelectDataset, out interface{}
return err
}
tx, err := getDBReader(ctx)
if err != nil {
return err
}
rows, err := tx.QueryxContext(ctx, q, args...)
wrapper := dbWrapper{}
rows, err := wrapper.QueryxContext(ctx, q, args...)
if err != nil {
return fmt.Errorf("querying `%s` [%v]: %w", q, args, err)
}

View File

@@ -13,9 +13,6 @@ var (
imagesTagsJoinTable = goqu.T(imagesTagsTable)
performersImagesJoinTable = goqu.T(performersImagesTable)
imagesFilesJoinTable = goqu.T(imagesFilesTable)
imagesQueryTable = goqu.T("images_query")
galleriesQueryTable = goqu.T("galleries_query")
scenesQueryTable = goqu.T("scenes_query")
galleriesFilesJoinTable = goqu.T(galleriesFilesTable)
galleriesTagsJoinTable = goqu.T(galleriesTagsTable)
@@ -35,11 +32,6 @@ var (
idColumn: goqu.T(imageTable).Col(idColumn),
}
imageQueryTableMgr = &table{
table: imagesQueryTable,
idColumn: imagesQueryTable.Col(idColumn),
}
imagesFilesTableMgr = &relatedFilesTable{
table: table{
table: imagesFilesJoinTable,
@@ -78,11 +70,6 @@ var (
idColumn: goqu.T(galleryTable).Col(idColumn),
}
galleryQueryTableMgr = &table{
table: galleriesQueryTable,
idColumn: galleriesQueryTable.Col(idColumn),
}
galleriesFilesTableMgr = &relatedFilesTable{
table: table{
table: galleriesFilesJoinTable,
@@ -121,11 +108,6 @@ var (
idColumn: goqu.T(sceneTable).Col(idColumn),
}
sceneQueryTableMgr = &table{
table: scenesQueryTable,
idColumn: scenesQueryTable.Col(idColumn),
}
scenesFilesTableMgr = &relatedFilesTable{
table: table{
table: scenesFilesJoinTable,

View File

@@ -2,10 +2,12 @@ package sqlite
import (
"context"
"errors"
"fmt"
"runtime/debug"
"github.com/jmoiron/sqlx"
"github.com/mattn/go-sqlite3"
"github.com/stashapp/stash/pkg/logger"
"github.com/stashapp/stash/pkg/models"
)
@@ -100,6 +102,14 @@ func getDBReader(ctx context.Context) (dbReader, error) {
return tx, nil
}
func (db *Database) IsLocked(err error) bool {
var sqliteError sqlite3.Error
if errors.As(err, &sqliteError) {
return sqliteError.Code == sqlite3.ErrBusy
}
return false
}
func (db *Database) TxnRepository() models.Repository {
return models.Repository{
TxnManager: db,

View File

@@ -3,6 +3,7 @@ package sqlite
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/jmoiron/sqlx"
@@ -31,67 +32,88 @@ func logSQL(start time.Time, query string, args ...interface{}) {
type dbWrapper struct{}
func sqlError(err error, sql string, args ...interface{}) error {
if err == nil {
return nil
}
return fmt.Errorf("error executing `%s` [%v]: %w", sql, args, err)
}
func (*dbWrapper) Get(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
tx, err := getDBReader(ctx)
if err != nil {
return err
return sqlError(err, query, args...)
}
start := time.Now()
err = tx.Get(dest, query, args...)
logSQL(start, query, args...)
return err
return sqlError(err, query, args...)
}
func (*dbWrapper) Select(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
tx, err := getDBReader(ctx)
if err != nil {
return err
return sqlError(err, query, args...)
}
start := time.Now()
err = tx.Select(dest, query, args...)
logSQL(start, query, args...)
return err
return sqlError(err, query, args...)
}
func (*dbWrapper) Queryx(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) {
tx, err := getDBReader(ctx)
if err != nil {
return nil, err
return nil, sqlError(err, query, args...)
}
start := time.Now()
ret, err := tx.Queryx(query, args...)
logSQL(start, query, args...)
return ret, err
return ret, sqlError(err, query, args...)
}
func (*dbWrapper) QueryxContext(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) {
tx, err := getDBReader(ctx)
if err != nil {
return nil, sqlError(err, query, args...)
}
start := time.Now()
ret, err := tx.QueryxContext(ctx, query, args...)
logSQL(start, query, args...)
return ret, sqlError(err, query, args...)
}
func (*dbWrapper) NamedExec(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
tx, err := getTx(ctx)
if err != nil {
return nil, err
return nil, sqlError(err, query, arg)
}
start := time.Now()
ret, err := tx.NamedExec(query, arg)
logSQL(start, query, arg)
return ret, err
return ret, sqlError(err, query, arg)
}
func (*dbWrapper) Exec(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
tx, err := getTx(ctx)
if err != nil {
return nil, err
return nil, sqlError(err, query, args...)
}
start := time.Now()
ret, err := tx.Exec(query, args...)
logSQL(start, query, args...)
return ret, err
return ret, sqlError(err, query, args...)
}

View File

@@ -1,12 +1,17 @@
package txn
import "context"
import (
"context"
"fmt"
)
type Manager interface {
Begin(ctx context.Context) (context.Context, error)
Commit(ctx context.Context) error
Rollback(ctx context.Context) error
IsLocked(err error) bool
AddPostCommitHook(ctx context.Context, hook TxnFunc)
AddPostRollbackHook(ctx context.Context, hook TxnFunc)
}
@@ -58,3 +63,33 @@ func WithDatabase(ctx context.Context, p DatabaseProvider, fn TxnFunc) error {
return fn(ctx)
}
type Retryer struct {
Manager Manager
Retries int
OnFail func(ctx context.Context, err error, attempt int) error
}
func (r Retryer) WithTxn(ctx context.Context, fn TxnFunc) error {
var attempt int
var err error
for attempt = 1; attempt <= r.Retries; attempt++ {
err = WithTxn(ctx, r.Manager, fn)
if err == nil {
return nil
}
if !r.Manager.IsLocked(err) {
return err
}
if r.OnFail != nil {
if err := r.OnFail(ctx, err, attempt); err != nil {
return err
}
}
}
return fmt.Errorf("failed after %d attempts: %w", attempt, err)
}