[Files Refactor] Performance tuning (#2865)

* Don't load image files by default
* Don't load gallery files by default
* Don't load scene files by default
* Retry locked transactions forever
* Don't show release notes if config not loaded
* Don't translate path slashes in export
This commit is contained in:
WithoutPants
2022-09-01 17:54:34 +10:00
parent 0b534d89c6
commit 273cf0383d
94 changed files with 2611 additions and 981 deletions

View File

@@ -5,6 +5,10 @@
//go:generate go run -mod=vendor github.com/vektah/dataloaden StudioLoader int *github.com/stashapp/stash/pkg/models.Studio
//go:generate go run -mod=vendor github.com/vektah/dataloaden TagLoader int *github.com/stashapp/stash/pkg/models.Tag
//go:generate go run -mod=vendor github.com/vektah/dataloaden MovieLoader int *github.com/stashapp/stash/pkg/models.Movie
//go:generate go run -mod=vendor github.com/vektah/dataloaden FileLoader github.com/stashapp/stash/pkg/file.ID github.com/stashapp/stash/pkg/file.File
//go:generate go run -mod=vendor github.com/vektah/dataloaden SceneFileIDsLoader int []github.com/stashapp/stash/pkg/file.ID
//go:generate go run -mod=vendor github.com/vektah/dataloaden ImageFileIDsLoader int []github.com/stashapp/stash/pkg/file.ID
//go:generate go run -mod=vendor github.com/vektah/dataloaden GalleryFileIDsLoader int []github.com/stashapp/stash/pkg/file.ID
package loaders
@@ -14,6 +18,7 @@ import (
"time"
"github.com/stashapp/stash/internal/manager"
"github.com/stashapp/stash/pkg/file"
"github.com/stashapp/stash/pkg/models"
"github.com/stashapp/stash/pkg/txn"
)
@@ -30,13 +35,18 @@ const (
)
type Loaders struct {
SceneByID *SceneLoader
SceneByID *SceneLoader
SceneFiles *SceneFileIDsLoader
ImageFiles *ImageFileIDsLoader
GalleryFiles *GalleryFileIDsLoader
GalleryByID *GalleryLoader
ImageByID *ImageLoader
PerformerByID *PerformerLoader
StudioByID *StudioLoader
TagByID *TagLoader
MovieByID *MovieLoader
FileByID *FileLoader
}
type Middleware struct {
@@ -83,6 +93,26 @@ func (m Middleware) Middleware(next http.Handler) http.Handler {
maxBatch: maxBatch,
fetch: m.fetchMovies(ctx),
},
FileByID: &FileLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchFiles(ctx),
},
SceneFiles: &SceneFileIDsLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchScenesFileIDs(ctx),
},
ImageFiles: &ImageFileIDsLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchImagesFileIDs(ctx),
},
GalleryFiles: &GalleryFileIDsLoader{
wait: wait,
maxBatch: maxBatch,
fetch: m.fetchGalleriesFileIDs(ctx),
},
}
newCtx := context.WithValue(r.Context(), loadersCtxKey, ldrs)
@@ -185,3 +215,47 @@ func (m Middleware) fetchMovies(ctx context.Context) func(keys []int) ([]*models
return ret, toErrorSlice(err)
}
}
func (m Middleware) fetchFiles(ctx context.Context) func(keys []file.ID) ([]file.File, []error) {
return func(keys []file.ID) (ret []file.File, errs []error) {
err := m.withTxn(ctx, func(ctx context.Context) error {
var err error
ret, err = m.Repository.File.Find(ctx, keys...)
return err
})
return ret, toErrorSlice(err)
}
}
func (m Middleware) fetchScenesFileIDs(ctx context.Context) func(keys []int) ([][]file.ID, []error) {
return func(keys []int) (ret [][]file.ID, errs []error) {
err := m.withTxn(ctx, func(ctx context.Context) error {
var err error
ret, err = m.Repository.Scene.GetManyFileIDs(ctx, keys)
return err
})
return ret, toErrorSlice(err)
}
}
func (m Middleware) fetchImagesFileIDs(ctx context.Context) func(keys []int) ([][]file.ID, []error) {
return func(keys []int) (ret [][]file.ID, errs []error) {
err := m.withTxn(ctx, func(ctx context.Context) error {
var err error
ret, err = m.Repository.Image.GetManyFileIDs(ctx, keys)
return err
})
return ret, toErrorSlice(err)
}
}
func (m Middleware) fetchGalleriesFileIDs(ctx context.Context) func(keys []int) ([][]file.ID, []error) {
return func(keys []int) (ret [][]file.ID, errs []error) {
err := m.withTxn(ctx, func(ctx context.Context) error {
var err error
ret, err = m.Repository.Gallery.GetManyFileIDs(ctx, keys)
return err
})
return ret, toErrorSlice(err)
}
}

View File

@@ -0,0 +1,221 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"github.com/stashapp/stash/pkg/file"
)
// FileLoaderConfig captures the config to create a new FileLoader
type FileLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []file.ID) ([]file.File, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewFileLoader creates a new FileLoader given a fetch, wait, and maxBatch
func NewFileLoader(config FileLoaderConfig) *FileLoader {
return &FileLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// FileLoader batches and caches requests
type FileLoader struct {
// this method provides the data for the loader
fetch func(keys []file.ID) ([]file.File, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[file.ID]file.File
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *fileLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type fileLoaderBatch struct {
keys []file.ID
data []file.File
error []error
closing bool
done chan struct{}
}
// Load a File by key, batching and caching will be applied automatically
func (l *FileLoader) Load(key file.ID) (file.File, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a File.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *FileLoader) LoadThunk(key file.ID) func() (file.File, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() (file.File, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &fileLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() (file.File, error) {
<-batch.done
var data file.File
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *FileLoader) LoadAll(keys []file.ID) ([]file.File, []error) {
results := make([]func() (file.File, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
files := make([]file.File, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
files[i], errors[i] = thunk()
}
return files, errors
}
// LoadAllThunk returns a function that when called will block waiting for a Files.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *FileLoader) LoadAllThunk(keys []file.ID) func() ([]file.File, []error) {
results := make([]func() (file.File, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]file.File, []error) {
files := make([]file.File, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
files[i], errors[i] = thunk()
}
return files, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *FileLoader) Prime(key file.ID, value file.File) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
l.unsafeSet(key, value)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *FileLoader) Clear(key file.ID) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *FileLoader) unsafeSet(key file.ID, value file.File) {
if l.cache == nil {
l.cache = map[file.ID]file.File{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *fileLoaderBatch) keyIndex(l *FileLoader, key file.ID) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *fileLoaderBatch) startTimer(l *FileLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *fileLoaderBatch) end(l *FileLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@@ -0,0 +1,225 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"github.com/stashapp/stash/pkg/file"
)
// GalleryFileIDsLoaderConfig captures the config to create a new GalleryFileIDsLoader
type GalleryFileIDsLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([][]file.ID, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewGalleryFileIDsLoader creates a new GalleryFileIDsLoader given a fetch, wait, and maxBatch
func NewGalleryFileIDsLoader(config GalleryFileIDsLoaderConfig) *GalleryFileIDsLoader {
return &GalleryFileIDsLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// GalleryFileIDsLoader batches and caches requests
type GalleryFileIDsLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([][]file.ID, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int][]file.ID
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *galleryFileIDsLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type galleryFileIDsLoaderBatch struct {
keys []int
data [][]file.ID
error []error
closing bool
done chan struct{}
}
// Load a ID by key, batching and caching will be applied automatically
func (l *GalleryFileIDsLoader) Load(key int) ([]file.ID, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a ID.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *GalleryFileIDsLoader) LoadThunk(key int) func() ([]file.ID, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() ([]file.ID, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &galleryFileIDsLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() ([]file.ID, error) {
<-batch.done
var data []file.ID
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *GalleryFileIDsLoader) LoadAll(keys []int) ([][]file.ID, []error) {
results := make([]func() ([]file.ID, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
iDs := make([][]file.ID, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
iDs[i], errors[i] = thunk()
}
return iDs, errors
}
// LoadAllThunk returns a function that when called will block waiting for a IDs.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *GalleryFileIDsLoader) LoadAllThunk(keys []int) func() ([][]file.ID, []error) {
results := make([]func() ([]file.ID, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([][]file.ID, []error) {
iDs := make([][]file.ID, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
iDs[i], errors[i] = thunk()
}
return iDs, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *GalleryFileIDsLoader) Prime(key int, value []file.ID) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := make([]file.ID, len(value))
copy(cpy, value)
l.unsafeSet(key, cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *GalleryFileIDsLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *GalleryFileIDsLoader) unsafeSet(key int, value []file.ID) {
if l.cache == nil {
l.cache = map[int][]file.ID{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *galleryFileIDsLoaderBatch) keyIndex(l *GalleryFileIDsLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *galleryFileIDsLoaderBatch) startTimer(l *GalleryFileIDsLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *galleryFileIDsLoaderBatch) end(l *GalleryFileIDsLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@@ -0,0 +1,225 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"github.com/stashapp/stash/pkg/file"
)
// ImageFileIDsLoaderConfig captures the config to create a new ImageFileIDsLoader
type ImageFileIDsLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([][]file.ID, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewImageFileIDsLoader creates a new ImageFileIDsLoader given a fetch, wait, and maxBatch
func NewImageFileIDsLoader(config ImageFileIDsLoaderConfig) *ImageFileIDsLoader {
return &ImageFileIDsLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// ImageFileIDsLoader batches and caches requests
type ImageFileIDsLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([][]file.ID, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int][]file.ID
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *imageFileIDsLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type imageFileIDsLoaderBatch struct {
keys []int
data [][]file.ID
error []error
closing bool
done chan struct{}
}
// Load a ID by key, batching and caching will be applied automatically
func (l *ImageFileIDsLoader) Load(key int) ([]file.ID, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a ID.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *ImageFileIDsLoader) LoadThunk(key int) func() ([]file.ID, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() ([]file.ID, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &imageFileIDsLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() ([]file.ID, error) {
<-batch.done
var data []file.ID
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *ImageFileIDsLoader) LoadAll(keys []int) ([][]file.ID, []error) {
results := make([]func() ([]file.ID, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
iDs := make([][]file.ID, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
iDs[i], errors[i] = thunk()
}
return iDs, errors
}
// LoadAllThunk returns a function that when called will block waiting for a IDs.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *ImageFileIDsLoader) LoadAllThunk(keys []int) func() ([][]file.ID, []error) {
results := make([]func() ([]file.ID, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([][]file.ID, []error) {
iDs := make([][]file.ID, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
iDs[i], errors[i] = thunk()
}
return iDs, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *ImageFileIDsLoader) Prime(key int, value []file.ID) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := make([]file.ID, len(value))
copy(cpy, value)
l.unsafeSet(key, cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *ImageFileIDsLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *ImageFileIDsLoader) unsafeSet(key int, value []file.ID) {
if l.cache == nil {
l.cache = map[int][]file.ID{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *imageFileIDsLoaderBatch) keyIndex(l *ImageFileIDsLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *imageFileIDsLoaderBatch) startTimer(l *ImageFileIDsLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *imageFileIDsLoaderBatch) end(l *ImageFileIDsLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@@ -0,0 +1,225 @@
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package loaders
import (
"sync"
"time"
"github.com/stashapp/stash/pkg/file"
)
// SceneFileIDsLoaderConfig captures the config to create a new SceneFileIDsLoader
type SceneFileIDsLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([][]file.ID, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// NewSceneFileIDsLoader creates a new SceneFileIDsLoader given a fetch, wait, and maxBatch
func NewSceneFileIDsLoader(config SceneFileIDsLoaderConfig) *SceneFileIDsLoader {
return &SceneFileIDsLoader{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// SceneFileIDsLoader batches and caches requests
type SceneFileIDsLoader struct {
// this method provides the data for the loader
fetch func(keys []int) ([][]file.ID, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[int][]file.ID
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *sceneFileIDsLoaderBatch
// mutex to prevent races
mu sync.Mutex
}
type sceneFileIDsLoaderBatch struct {
keys []int
data [][]file.ID
error []error
closing bool
done chan struct{}
}
// Load a ID by key, batching and caching will be applied automatically
func (l *SceneFileIDsLoader) Load(key int) ([]file.ID, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a ID.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *SceneFileIDsLoader) LoadThunk(key int) func() ([]file.ID, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() ([]file.ID, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &sceneFileIDsLoaderBatch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() ([]file.ID, error) {
<-batch.done
var data []file.ID
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *SceneFileIDsLoader) LoadAll(keys []int) ([][]file.ID, []error) {
results := make([]func() ([]file.ID, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
iDs := make([][]file.ID, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
iDs[i], errors[i] = thunk()
}
return iDs, errors
}
// LoadAllThunk returns a function that when called will block waiting for a IDs.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *SceneFileIDsLoader) LoadAllThunk(keys []int) func() ([][]file.ID, []error) {
results := make([]func() ([]file.ID, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([][]file.ID, []error) {
iDs := make([][]file.ID, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
iDs[i], errors[i] = thunk()
}
return iDs, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *SceneFileIDsLoader) Prime(key int, value []file.ID) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := make([]file.ID, len(value))
copy(cpy, value)
l.unsafeSet(key, cpy)
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *SceneFileIDsLoader) Clear(key int) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *SceneFileIDsLoader) unsafeSet(key int, value []file.ID) {
if l.cache == nil {
l.cache = map[int][]file.ID{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *sceneFileIDsLoaderBatch) keyIndex(l *SceneFileIDsLoader, key int) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *sceneFileIDsLoaderBatch) startTimer(l *SceneFileIDsLoader) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *sceneFileIDsLoaderBatch) end(l *SceneFileIDsLoader) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}

View File

@@ -12,10 +12,38 @@ import (
"github.com/stashapp/stash/pkg/models"
)
func (r *galleryResolver) Files(ctx context.Context, obj *models.Gallery) ([]*GalleryFile, error) {
ret := make([]*GalleryFile, len(obj.Files))
func (r *galleryResolver) getPrimaryFile(ctx context.Context, obj *models.Gallery) (file.File, error) {
if obj.PrimaryFileID != nil {
f, err := loaders.From(ctx).FileByID.Load(*obj.PrimaryFileID)
if err != nil {
return nil, err
}
for i, f := range obj.Files {
return f, nil
}
return nil, nil
}
func (r *galleryResolver) getFiles(ctx context.Context, obj *models.Gallery) ([]file.File, error) {
fileIDs, err := loaders.From(ctx).GalleryFiles.Load(obj.ID)
if err != nil {
return nil, err
}
files, errs := loaders.From(ctx).FileByID.LoadAll(fileIDs)
return files, firstError(errs)
}
func (r *galleryResolver) Files(ctx context.Context, obj *models.Gallery) ([]*GalleryFile, error) {
files, err := r.getFiles(ctx, obj)
if err != nil {
return nil, err
}
ret := make([]*GalleryFile, len(files))
for i, f := range files {
base := f.Base()
ret[i] = &GalleryFile{
ID: strconv.Itoa(int(base.ID)),
@@ -84,7 +112,10 @@ func (r *galleryResolver) Folder(ctx context.Context, obj *models.Gallery) (*Fol
}
func (r *galleryResolver) FileModTime(ctx context.Context, obj *models.Gallery) (*time.Time, error) {
f := obj.PrimaryFile()
f, err := r.getPrimaryFile(ctx, obj)
if err != nil {
return nil, err
}
if f != nil {
return &f.Base().ModTime, nil
}

View File

@@ -2,21 +2,69 @@ package api
import (
"context"
"fmt"
"strconv"
"time"
"github.com/stashapp/stash/internal/api/loaders"
"github.com/stashapp/stash/internal/api/urlbuilders"
"github.com/stashapp/stash/pkg/file"
"github.com/stashapp/stash/pkg/models"
)
func (r *imageResolver) getPrimaryFile(ctx context.Context, obj *models.Image) (*file.ImageFile, error) {
if obj.PrimaryFileID != nil {
f, err := loaders.From(ctx).FileByID.Load(*obj.PrimaryFileID)
if err != nil {
return nil, err
}
ret, ok := f.(*file.ImageFile)
if !ok {
return nil, fmt.Errorf("file %T is not an image file", f)
}
return ret, nil
}
return nil, nil
}
func (r *imageResolver) getFiles(ctx context.Context, obj *models.Image) ([]*file.ImageFile, error) {
fileIDs, err := loaders.From(ctx).ImageFiles.Load(obj.ID)
if err != nil {
return nil, err
}
files, errs := loaders.From(ctx).FileByID.LoadAll(fileIDs)
ret := make([]*file.ImageFile, len(files))
for i, bf := range files {
f, ok := bf.(*file.ImageFile)
if !ok {
return nil, fmt.Errorf("file %T is not an image file", f)
}
ret[i] = f
}
return ret, firstError(errs)
}
func (r *imageResolver) Title(ctx context.Context, obj *models.Image) (*string, error) {
ret := obj.GetTitle()
return &ret, nil
}
func (r *imageResolver) File(ctx context.Context, obj *models.Image) (*ImageFileType, error) {
f := obj.PrimaryFile()
f, err := r.getPrimaryFile(ctx, obj)
if err != nil {
return nil, err
}
if f == nil {
return nil, nil
}
width := f.Width
height := f.Height
size := f.Size
@@ -28,9 +76,14 @@ func (r *imageResolver) File(ctx context.Context, obj *models.Image) (*ImageFile
}
func (r *imageResolver) Files(ctx context.Context, obj *models.Image) ([]*ImageFile, error) {
ret := make([]*ImageFile, len(obj.Files))
files, err := r.getFiles(ctx, obj)
if err != nil {
return nil, err
}
for i, f := range obj.Files {
ret := make([]*ImageFile, len(files))
for i, f := range files {
ret[i] = &ImageFile{
ID: strconv.Itoa(int(f.ID)),
Path: f.Path,
@@ -55,7 +108,10 @@ func (r *imageResolver) Files(ctx context.Context, obj *models.Image) ([]*ImageF
}
func (r *imageResolver) FileModTime(ctx context.Context, obj *models.Image) (*time.Time, error) {
f := obj.PrimaryFile()
f, err := r.getPrimaryFile(ctx, obj)
if err != nil {
return nil, err
}
if f != nil {
return &f.ModTime, nil
}

View File

@@ -14,9 +14,56 @@ import (
"github.com/stashapp/stash/pkg/utils"
)
func (r *sceneResolver) getPrimaryFile(ctx context.Context, obj *models.Scene) (*file.VideoFile, error) {
if obj.PrimaryFileID != nil {
f, err := loaders.From(ctx).FileByID.Load(*obj.PrimaryFileID)
if err != nil {
return nil, err
}
ret, ok := f.(*file.VideoFile)
if !ok {
return nil, fmt.Errorf("file %T is not an image file", f)
}
obj.Files.SetPrimary(ret)
return ret, nil
}
return nil, nil
}
func (r *sceneResolver) getFiles(ctx context.Context, obj *models.Scene) ([]*file.VideoFile, error) {
fileIDs, err := loaders.From(ctx).SceneFiles.Load(obj.ID)
if err != nil {
return nil, err
}
files, errs := loaders.From(ctx).FileByID.LoadAll(fileIDs)
ret := make([]*file.VideoFile, len(files))
for i, bf := range files {
f, ok := bf.(*file.VideoFile)
if !ok {
return nil, fmt.Errorf("file %T is not a video file", f)
}
ret[i] = f
}
obj.Files.Set(ret)
return ret, firstError(errs)
}
func (r *sceneResolver) FileModTime(ctx context.Context, obj *models.Scene) (*time.Time, error) {
if obj.PrimaryFile() != nil {
return &obj.PrimaryFile().ModTime, nil
f, err := r.getPrimaryFile(ctx, obj)
if err != nil {
return nil, err
}
if f != nil {
return &f.ModTime, nil
}
return nil, nil
}
@@ -31,7 +78,10 @@ func (r *sceneResolver) Date(ctx context.Context, obj *models.Scene) (*string, e
// File is deprecated
func (r *sceneResolver) File(ctx context.Context, obj *models.Scene) (*models.SceneFileType, error) {
f := obj.PrimaryFile()
f, err := r.getPrimaryFile(ctx, obj)
if err != nil {
return nil, err
}
if f == nil {
return nil, nil
}
@@ -52,9 +102,14 @@ func (r *sceneResolver) File(ctx context.Context, obj *models.Scene) (*models.Sc
}
func (r *sceneResolver) Files(ctx context.Context, obj *models.Scene) ([]*VideoFile, error) {
ret := make([]*VideoFile, len(obj.Files))
files, err := r.getFiles(ctx, obj)
if err != nil {
return nil, err
}
for i, f := range obj.Files {
ret := make([]*VideoFile, len(files))
for i, f := range files {
ret[i] = &VideoFile{
ID: strconv.Itoa(int(f.ID)),
Path: f.Path,
@@ -148,7 +203,10 @@ func (r *sceneResolver) SceneMarkers(ctx context.Context, obj *models.Scene) (re
}
func (r *sceneResolver) Captions(ctx context.Context, obj *models.Scene) (ret []*models.VideoCaption, err error) {
primaryFile := obj.PrimaryFile()
primaryFile, err := r.getPrimaryFile(ctx, obj)
if err != nil {
return nil, err
}
if primaryFile == nil {
return nil, nil
}
@@ -265,7 +323,22 @@ func (r *sceneResolver) StashIds(ctx context.Context, obj *models.Scene) (ret []
}
func (r *sceneResolver) Phash(ctx context.Context, obj *models.Scene) (*string, error) {
phash := obj.Phash()
f, err := r.getPrimaryFile(ctx, obj)
if err != nil {
return nil, err
}
if f == nil {
return nil, nil
}
val := f.Fingerprints.Get(file.FingerprintTypePhash)
if val == nil {
return nil, nil
}
phash, _ := val.(int64)
if phash != 0 {
hexval := utils.PhashToString(phash)
return &hexval, nil
@@ -274,6 +347,12 @@ func (r *sceneResolver) Phash(ctx context.Context, obj *models.Scene) (*string,
}
func (r *sceneResolver) SceneStreams(ctx context.Context, obj *models.Scene) ([]*manager.SceneStreamEndpoint, error) {
// load the primary file into the scene
_, err := r.getPrimaryFile(ctx, obj)
if err != nil {
return nil, err
}
config := manager.GetInstance().Config
baseURL, _ := ctx.Value(BaseURLCtxKey).(string)
@@ -283,7 +362,10 @@ func (r *sceneResolver) SceneStreams(ctx context.Context, obj *models.Scene) ([]
}
func (r *sceneResolver) Interactive(ctx context.Context, obj *models.Scene) (bool, error) {
primaryFile := obj.PrimaryFile()
primaryFile, err := r.getPrimaryFile(ctx, obj)
if err != nil {
return false, err
}
if primaryFile == nil {
return false, nil
}
@@ -292,7 +374,10 @@ func (r *sceneResolver) Interactive(ctx context.Context, obj *models.Scene) (boo
}
func (r *sceneResolver) InteractiveSpeed(ctx context.Context, obj *models.Scene) (*int, error) {
primaryFile := obj.PrimaryFile()
primaryFile, err := r.getPrimaryFile(ctx, obj)
if err != nil {
return nil, err
}
if primaryFile == nil {
return nil, nil
}

View File

@@ -29,7 +29,7 @@ func (r *mutationResolver) ConfigureGeneral(ctx context.Context, input ConfigGen
c := config.GetInstance()
existingPaths := c.GetStashPaths()
if len(input.Stashes) > 0 {
if input.Stashes != nil {
for _, s := range input.Stashes {
// Only validate existence of new paths
isNew := true

View File

@@ -338,6 +338,10 @@ func (r *mutationResolver) GalleryDestroy(ctx context.Context, input models.Gall
return fmt.Errorf("gallery with id %d not found", id)
}
if err := gallery.LoadFiles(ctx, qb); err != nil {
return err
}
galleries = append(galleries, gallery)
imgsDestroyed, err = r.galleryService.Destroy(ctx, gallery, fileDeleter, deleteGenerated, deleteFile)
@@ -357,7 +361,7 @@ func (r *mutationResolver) GalleryDestroy(ctx context.Context, input models.Gall
for _, gallery := range galleries {
// don't delete stash library paths
path := gallery.Path()
path := gallery.Path
if deleteFile && path != "" && !isStashPath(path) {
// try to remove the folder - it is possible that it is not empty
// so swallow the error if present
@@ -370,15 +374,15 @@ func (r *mutationResolver) GalleryDestroy(ctx context.Context, input models.Gall
r.hookExecutor.ExecutePostHooks(ctx, gallery.ID, plugin.GalleryDestroyPost, plugin.GalleryDestroyInput{
GalleryDestroyInput: input,
Checksum: gallery.Checksum(),
Path: gallery.Path(),
Path: gallery.Path,
}, nil)
}
// call image destroy post hook as well
for _, img := range imgsDestroyed {
r.hookExecutor.ExecutePostHooks(ctx, img.ID, plugin.ImageDestroyPost, plugin.ImageDestroyInput{
Checksum: img.Checksum(),
Path: img.Path(),
Checksum: img.Checksum,
Path: img.Path,
}, nil)
}

View File

@@ -240,8 +240,8 @@ func (r *mutationResolver) ImageDestroy(ctx context.Context, input models.ImageD
// call post hook after performing the other actions
r.hookExecutor.ExecutePostHooks(ctx, i.ID, plugin.ImageDestroyPost, plugin.ImageDestroyInput{
ImageDestroyInput: input,
Checksum: i.Checksum(),
Path: i.Path(),
Checksum: i.Checksum,
Path: i.Path,
}, nil)
return true, nil
@@ -291,8 +291,8 @@ func (r *mutationResolver) ImagesDestroy(ctx context.Context, input models.Image
// call post hook after performing the other actions
r.hookExecutor.ExecutePostHooks(ctx, image.ID, plugin.ImageDestroyPost, plugin.ImagesDestroyInput{
ImagesDestroyInput: input,
Checksum: image.Checksum(),
Path: image.Path(),
Checksum: image.Checksum,
Path: image.Path,
}, nil)
}

View File

@@ -366,9 +366,9 @@ func (r *mutationResolver) SceneDestroy(ctx context.Context, input models.SceneD
// call post hook after performing the other actions
r.hookExecutor.ExecutePostHooks(ctx, s.ID, plugin.SceneDestroyPost, plugin.SceneDestroyInput{
SceneDestroyInput: input,
Checksum: s.Checksum(),
OSHash: s.OSHash(),
Path: s.Path(),
Checksum: s.Checksum,
OSHash: s.OSHash,
Path: s.Path,
}, nil)
return true, nil
@@ -422,9 +422,9 @@ func (r *mutationResolver) ScenesDestroy(ctx context.Context, input models.Scene
// call post hook after performing the other actions
r.hookExecutor.ExecutePostHooks(ctx, scene.ID, plugin.SceneDestroyPost, plugin.ScenesDestroyInput{
ScenesDestroyInput: input,
Checksum: scene.Checksum(),
OSHash: scene.OSHash(),
Path: scene.Path(),
Checksum: scene.Checksum,
OSHash: scene.OSHash,
Path: scene.Path,
}, nil)
}

View File

@@ -86,7 +86,11 @@ func (r *queryResolver) FindScenes(ctx context.Context, sceneFilter *models.Scen
if err == nil {
result.Count = len(scenes)
for _, s := range scenes {
f := s.PrimaryFile()
if err = s.LoadPrimaryFile(ctx, r.repository.File); err != nil {
break
}
f := s.Files.Primary()
if f == nil {
continue
}

View File

@@ -18,6 +18,11 @@ func (r *queryResolver) SceneStreams(ctx context.Context, id *string) ([]*manage
idInt, _ := strconv.Atoi(*id)
var err error
scene, err = r.repository.Scene.Find(ctx, idInt)
if scene != nil {
err = scene.LoadPrimaryFile(ctx, r.repository.File)
}
return err
}); err != nil {
return nil, err

View File

@@ -25,6 +25,7 @@ type ImageFinder interface {
type imageRoutes struct {
txnManager txn.Manager
imageFinder ImageFinder
fileFinder file.Finder
}
func (rs imageRoutes) Routes() chi.Router {
@@ -44,7 +45,7 @@ func (rs imageRoutes) Routes() chi.Router {
func (rs imageRoutes) Thumbnail(w http.ResponseWriter, r *http.Request) {
img := r.Context().Value(imageKey).(*models.Image)
filepath := manager.GetInstance().Paths.Generated.GetThumbnailPath(img.Checksum(), models.DefaultGthumbWidth)
filepath := manager.GetInstance().Paths.Generated.GetThumbnailPath(img.Checksum, models.DefaultGthumbWidth)
w.Header().Add("Cache-Control", "max-age=604800000")
@@ -54,7 +55,7 @@ func (rs imageRoutes) Thumbnail(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, filepath)
} else {
// don't return anything if there is no file
f := img.PrimaryFile()
f := img.Files.Primary()
if f == nil {
// TODO - probably want to return a placeholder
http.Error(w, http.StatusText(404), 404)
@@ -81,7 +82,7 @@ func (rs imageRoutes) Thumbnail(w http.ResponseWriter, r *http.Request) {
// write the generated thumbnail to disk if enabled
if manager.GetInstance().Config.IsWriteImageThumbnails() {
logger.Debugf("writing thumbnail to disk: %s", img.Path())
logger.Debugf("writing thumbnail to disk: %s", img.Path)
if err := fsutil.WriteFile(filepath, data); err != nil {
logger.Errorf("error writing thumbnail for image %s: %s", img.Path, err)
}
@@ -97,12 +98,12 @@ func (rs imageRoutes) Image(w http.ResponseWriter, r *http.Request) {
// if image is in a zip file, we need to serve it specifically
if len(i.Files) == 0 {
if i.Files.Primary() == nil {
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
return
}
i.Files[0].Serve(&file.OsFS{}, w, r)
i.Files.Primary().Serve(&file.OsFS{}, w, r)
}
// endregion
@@ -124,6 +125,10 @@ func (rs imageRoutes) ImageCtx(next http.Handler) http.Handler {
image, _ = qb.Find(ctx, imageID)
}
if image != nil {
_ = image.LoadPrimaryFile(ctx, rs.fileFinder)
}
return nil
})
if readTxnErr != nil {

View File

@@ -41,6 +41,7 @@ type CaptionFinder interface {
type sceneRoutes struct {
txnManager txn.Manager
sceneFinder SceneFinder
fileFinder file.Finder
captionFinder CaptionFinder
sceneMarkerFinder SceneMarkerFinder
tagFinder scene.MarkerTagFinder
@@ -94,7 +95,12 @@ func (rs sceneRoutes) StreamMKV(w http.ResponseWriter, r *http.Request) {
// only allow mkv streaming if the scene container is an mkv already
scene := r.Context().Value(sceneKey).(*models.Scene)
container, err := manager.GetSceneFileContainer(scene)
pf := scene.Files.Primary()
if pf == nil {
return
}
container, err := manager.GetVideoFileContainer(pf)
if err != nil {
logger.Errorf("[transcode] error getting container: %v", err)
}
@@ -121,10 +127,8 @@ func (rs sceneRoutes) StreamMp4(w http.ResponseWriter, r *http.Request) {
func (rs sceneRoutes) StreamHLS(w http.ResponseWriter, r *http.Request) {
scene := r.Context().Value(sceneKey).(*models.Scene)
ffprobe := manager.GetInstance().FFProbe
videoFile, err := ffprobe.NewVideoFile(scene.Path())
if err != nil {
logger.Errorf("[stream] error reading video file: %v", err)
pf := scene.Files.Primary()
if pf == nil {
return
}
@@ -134,7 +138,7 @@ func (rs sceneRoutes) StreamHLS(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", ffmpeg.MimeHLS)
var str strings.Builder
ffmpeg.WriteHLSPlaylist(videoFile.Duration, r.URL.String(), &str)
ffmpeg.WriteHLSPlaylist(pf.Duration, r.URL.String(), &str)
requestByteRange := createByteRange(r.Header.Get("Range"))
if requestByteRange.RawString != "" {
@@ -157,7 +161,10 @@ func (rs sceneRoutes) StreamTS(w http.ResponseWriter, r *http.Request) {
func (rs sceneRoutes) streamTranscode(w http.ResponseWriter, r *http.Request, streamFormat ffmpeg.StreamFormat) {
scene := r.Context().Value(sceneKey).(*models.Scene)
f := scene.PrimaryFile()
f := scene.Files.Primary()
if f == nil {
return
}
logger.Debugf("Streaming as %s", streamFormat.MimeType)
// start stream based on query param, if provided
@@ -306,7 +313,7 @@ func (rs sceneRoutes) ChapterVtt(w http.ResponseWriter, r *http.Request) {
func (rs sceneRoutes) Funscript(w http.ResponseWriter, r *http.Request) {
s := r.Context().Value(sceneKey).(*models.Scene)
funscript := video.GetFunscriptPath(s.Path())
funscript := video.GetFunscriptPath(s.Path)
serveFileNoCache(w, r, funscript)
}
@@ -322,7 +329,7 @@ func (rs sceneRoutes) Caption(w http.ResponseWriter, r *http.Request, lang strin
if err := txn.WithTxn(r.Context(), rs.txnManager, func(ctx context.Context) error {
var err error
primaryFile := s.PrimaryFile()
primaryFile := s.Files.Primary()
if primaryFile == nil {
return nil
}
@@ -330,7 +337,7 @@ func (rs sceneRoutes) Caption(w http.ResponseWriter, r *http.Request, lang strin
captions, err := rs.captionFinder.GetCaptions(ctx, primaryFile.Base().ID)
for _, caption := range captions {
if lang == caption.LanguageCode && ext == caption.CaptionType {
sub, err := video.ReadSubs(caption.Path(s.Path()))
sub, err := video.ReadSubs(caption.Path(s.Path))
if err == nil {
var b bytes.Buffer
err = sub.WriteToWebVTT(&b)
@@ -492,6 +499,10 @@ func (rs sceneRoutes) SceneCtx(next http.Handler) http.Handler {
scene, _ = qb.Find(ctx, sceneID)
}
if scene != nil {
_ = scene.LoadPrimaryFile(ctx, rs.fileFinder)
}
return nil
})
if readTxnErr != nil {

View File

@@ -140,6 +140,7 @@ func Start() error {
r.Mount("/scene", sceneRoutes{
txnManager: txnManager,
sceneFinder: txnManager.Scene,
fileFinder: txnManager.File,
captionFinder: txnManager.File,
sceneMarkerFinder: txnManager.SceneMarker,
tagFinder: txnManager.Tag,
@@ -147,6 +148,7 @@ func Start() error {
r.Mount("/image", imageRoutes{
txnManager: txnManager,
imageFinder: txnManager.Image,
fileFinder: txnManager.File,
}.Routes())
r.Mount("/studio", studioRoutes{
txnManager: txnManager,