mirror of
https://github.com/stashapp/stash.git
synced 2025-12-17 12:24:38 +03:00
Model refactor (#3915)
* Add mockery config file * Move basic file/folder structs to models * Fix hack due to import loop * Move file interfaces to models * Move folder interfaces to models * Move scene interfaces to models * Move scene marker interfaces to models * Move image interfaces to models * Move gallery interfaces to models * Move gallery chapter interfaces to models * Move studio interfaces to models * Move movie interfaces to models * Move performer interfaces to models * Move tag interfaces to models * Move autotag interfaces to models * Regenerate mocks
This commit is contained in:
@@ -6,13 +6,13 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/stashapp/stash/pkg/file"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
)
|
||||
|
||||
// GalleryFileIDsLoaderConfig captures the config to create a new GalleryFileIDsLoader
|
||||
type GalleryFileIDsLoaderConfig struct {
|
||||
// Fetch is a method that provides the data for the loader
|
||||
Fetch func(keys []int) ([][]file.ID, []error)
|
||||
Fetch func(keys []int) ([][]models.FileID, []error)
|
||||
|
||||
// Wait is how long wait before sending a batch
|
||||
Wait time.Duration
|
||||
@@ -33,7 +33,7 @@ func NewGalleryFileIDsLoader(config GalleryFileIDsLoaderConfig) *GalleryFileIDsL
|
||||
// GalleryFileIDsLoader batches and caches requests
|
||||
type GalleryFileIDsLoader struct {
|
||||
// this method provides the data for the loader
|
||||
fetch func(keys []int) ([][]file.ID, []error)
|
||||
fetch func(keys []int) ([][]models.FileID, []error)
|
||||
|
||||
// how long to done before sending a batch
|
||||
wait time.Duration
|
||||
@@ -44,7 +44,7 @@ type GalleryFileIDsLoader struct {
|
||||
// INTERNAL
|
||||
|
||||
// lazily created cache
|
||||
cache map[int][]file.ID
|
||||
cache map[int][]models.FileID
|
||||
|
||||
// the current batch. keys will continue to be collected until timeout is hit,
|
||||
// then everything will be sent to the fetch method and out to the listeners
|
||||
@@ -56,25 +56,25 @@ type GalleryFileIDsLoader struct {
|
||||
|
||||
type galleryFileIDsLoaderBatch struct {
|
||||
keys []int
|
||||
data [][]file.ID
|
||||
data [][]models.FileID
|
||||
error []error
|
||||
closing bool
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// Load a ID by key, batching and caching will be applied automatically
|
||||
func (l *GalleryFileIDsLoader) Load(key int) ([]file.ID, error) {
|
||||
// Load a FileID by key, batching and caching will be applied automatically
|
||||
func (l *GalleryFileIDsLoader) Load(key int) ([]models.FileID, error) {
|
||||
return l.LoadThunk(key)()
|
||||
}
|
||||
|
||||
// LoadThunk returns a function that when called will block waiting for a ID.
|
||||
// LoadThunk returns a function that when called will block waiting for a FileID.
|
||||
// This method should be used if you want one goroutine to make requests to many
|
||||
// different data loaders without blocking until the thunk is called.
|
||||
func (l *GalleryFileIDsLoader) LoadThunk(key int) func() ([]file.ID, error) {
|
||||
func (l *GalleryFileIDsLoader) LoadThunk(key int) func() ([]models.FileID, error) {
|
||||
l.mu.Lock()
|
||||
if it, ok := l.cache[key]; ok {
|
||||
l.mu.Unlock()
|
||||
return func() ([]file.ID, error) {
|
||||
return func() ([]models.FileID, error) {
|
||||
return it, nil
|
||||
}
|
||||
}
|
||||
@@ -85,10 +85,10 @@ func (l *GalleryFileIDsLoader) LoadThunk(key int) func() ([]file.ID, error) {
|
||||
pos := batch.keyIndex(l, key)
|
||||
l.mu.Unlock()
|
||||
|
||||
return func() ([]file.ID, error) {
|
||||
return func() ([]models.FileID, error) {
|
||||
<-batch.done
|
||||
|
||||
var data []file.ID
|
||||
var data []models.FileID
|
||||
if pos < len(batch.data) {
|
||||
data = batch.data[pos]
|
||||
}
|
||||
@@ -113,49 +113,49 @@ func (l *GalleryFileIDsLoader) LoadThunk(key int) func() ([]file.ID, error) {
|
||||
|
||||
// LoadAll fetches many keys at once. It will be broken into appropriate sized
|
||||
// sub batches depending on how the loader is configured
|
||||
func (l *GalleryFileIDsLoader) LoadAll(keys []int) ([][]file.ID, []error) {
|
||||
results := make([]func() ([]file.ID, error), len(keys))
|
||||
func (l *GalleryFileIDsLoader) LoadAll(keys []int) ([][]models.FileID, []error) {
|
||||
results := make([]func() ([]models.FileID, error), len(keys))
|
||||
|
||||
for i, key := range keys {
|
||||
results[i] = l.LoadThunk(key)
|
||||
}
|
||||
|
||||
iDs := make([][]file.ID, len(keys))
|
||||
fileIDs := make([][]models.FileID, len(keys))
|
||||
errors := make([]error, len(keys))
|
||||
for i, thunk := range results {
|
||||
iDs[i], errors[i] = thunk()
|
||||
fileIDs[i], errors[i] = thunk()
|
||||
}
|
||||
return iDs, errors
|
||||
return fileIDs, errors
|
||||
}
|
||||
|
||||
// LoadAllThunk returns a function that when called will block waiting for a IDs.
|
||||
// LoadAllThunk returns a function that when called will block waiting for a FileIDs.
|
||||
// This method should be used if you want one goroutine to make requests to many
|
||||
// different data loaders without blocking until the thunk is called.
|
||||
func (l *GalleryFileIDsLoader) LoadAllThunk(keys []int) func() ([][]file.ID, []error) {
|
||||
results := make([]func() ([]file.ID, error), len(keys))
|
||||
func (l *GalleryFileIDsLoader) LoadAllThunk(keys []int) func() ([][]models.FileID, []error) {
|
||||
results := make([]func() ([]models.FileID, error), len(keys))
|
||||
for i, key := range keys {
|
||||
results[i] = l.LoadThunk(key)
|
||||
}
|
||||
return func() ([][]file.ID, []error) {
|
||||
iDs := make([][]file.ID, len(keys))
|
||||
return func() ([][]models.FileID, []error) {
|
||||
fileIDs := make([][]models.FileID, len(keys))
|
||||
errors := make([]error, len(keys))
|
||||
for i, thunk := range results {
|
||||
iDs[i], errors[i] = thunk()
|
||||
fileIDs[i], errors[i] = thunk()
|
||||
}
|
||||
return iDs, errors
|
||||
return fileIDs, errors
|
||||
}
|
||||
}
|
||||
|
||||
// Prime the cache with the provided key and value. If the key already exists, no change is made
|
||||
// and false is returned.
|
||||
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
|
||||
func (l *GalleryFileIDsLoader) Prime(key int, value []file.ID) bool {
|
||||
func (l *GalleryFileIDsLoader) Prime(key int, value []models.FileID) bool {
|
||||
l.mu.Lock()
|
||||
var found bool
|
||||
if _, found = l.cache[key]; !found {
|
||||
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
|
||||
// and end up with the whole cache pointing to the same value.
|
||||
cpy := make([]file.ID, len(value))
|
||||
cpy := make([]models.FileID, len(value))
|
||||
copy(cpy, value)
|
||||
l.unsafeSet(key, cpy)
|
||||
}
|
||||
@@ -170,9 +170,9 @@ func (l *GalleryFileIDsLoader) Clear(key int) {
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
func (l *GalleryFileIDsLoader) unsafeSet(key int, value []file.ID) {
|
||||
func (l *GalleryFileIDsLoader) unsafeSet(key int, value []models.FileID) {
|
||||
if l.cache == nil {
|
||||
l.cache = map[int][]file.ID{}
|
||||
l.cache = map[int][]models.FileID{}
|
||||
}
|
||||
l.cache[key] = value
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user