mirror of
https://github.com/stashapp/stash.git
synced 2025-12-18 04:44:37 +03:00
Performer custom fields (#5487)
* Backend changes * Show custom field values * Add custom fields table input * Add custom field filtering * Add unit tests * Include custom fields in import/export * Anonymise performer custom fields * Move json.Number handler functions to api * Handle json.Number conversion in api
This commit is contained in:
221
internal/api/loaders/customfieldsloader_gen.go
Normal file
221
internal/api/loaders/customfieldsloader_gen.go
Normal file
@@ -0,0 +1,221 @@
|
||||
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
|
||||
|
||||
package loaders
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
)
|
||||
|
||||
// CustomFieldsLoaderConfig captures the config to create a new CustomFieldsLoader
|
||||
type CustomFieldsLoaderConfig struct {
|
||||
// Fetch is a method that provides the data for the loader
|
||||
Fetch func(keys []int) ([]models.CustomFieldMap, []error)
|
||||
|
||||
// Wait is how long wait before sending a batch
|
||||
Wait time.Duration
|
||||
|
||||
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
|
||||
MaxBatch int
|
||||
}
|
||||
|
||||
// NewCustomFieldsLoader creates a new CustomFieldsLoader given a fetch, wait, and maxBatch
|
||||
func NewCustomFieldsLoader(config CustomFieldsLoaderConfig) *CustomFieldsLoader {
|
||||
return &CustomFieldsLoader{
|
||||
fetch: config.Fetch,
|
||||
wait: config.Wait,
|
||||
maxBatch: config.MaxBatch,
|
||||
}
|
||||
}
|
||||
|
||||
// CustomFieldsLoader batches and caches requests
|
||||
type CustomFieldsLoader struct {
|
||||
// this method provides the data for the loader
|
||||
fetch func(keys []int) ([]models.CustomFieldMap, []error)
|
||||
|
||||
// how long to done before sending a batch
|
||||
wait time.Duration
|
||||
|
||||
// this will limit the maximum number of keys to send in one batch, 0 = no limit
|
||||
maxBatch int
|
||||
|
||||
// INTERNAL
|
||||
|
||||
// lazily created cache
|
||||
cache map[int]models.CustomFieldMap
|
||||
|
||||
// the current batch. keys will continue to be collected until timeout is hit,
|
||||
// then everything will be sent to the fetch method and out to the listeners
|
||||
batch *customFieldsLoaderBatch
|
||||
|
||||
// mutex to prevent races
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type customFieldsLoaderBatch struct {
|
||||
keys []int
|
||||
data []models.CustomFieldMap
|
||||
error []error
|
||||
closing bool
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// Load a CustomFieldMap by key, batching and caching will be applied automatically
|
||||
func (l *CustomFieldsLoader) Load(key int) (models.CustomFieldMap, error) {
|
||||
return l.LoadThunk(key)()
|
||||
}
|
||||
|
||||
// LoadThunk returns a function that when called will block waiting for a CustomFieldMap.
|
||||
// This method should be used if you want one goroutine to make requests to many
|
||||
// different data loaders without blocking until the thunk is called.
|
||||
func (l *CustomFieldsLoader) LoadThunk(key int) func() (models.CustomFieldMap, error) {
|
||||
l.mu.Lock()
|
||||
if it, ok := l.cache[key]; ok {
|
||||
l.mu.Unlock()
|
||||
return func() (models.CustomFieldMap, error) {
|
||||
return it, nil
|
||||
}
|
||||
}
|
||||
if l.batch == nil {
|
||||
l.batch = &customFieldsLoaderBatch{done: make(chan struct{})}
|
||||
}
|
||||
batch := l.batch
|
||||
pos := batch.keyIndex(l, key)
|
||||
l.mu.Unlock()
|
||||
|
||||
return func() (models.CustomFieldMap, error) {
|
||||
<-batch.done
|
||||
|
||||
var data models.CustomFieldMap
|
||||
if pos < len(batch.data) {
|
||||
data = batch.data[pos]
|
||||
}
|
||||
|
||||
var err error
|
||||
// its convenient to be able to return a single error for everything
|
||||
if len(batch.error) == 1 {
|
||||
err = batch.error[0]
|
||||
} else if batch.error != nil {
|
||||
err = batch.error[pos]
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
l.mu.Lock()
|
||||
l.unsafeSet(key, data)
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
return data, err
|
||||
}
|
||||
}
|
||||
|
||||
// LoadAll fetches many keys at once. It will be broken into appropriate sized
|
||||
// sub batches depending on how the loader is configured
|
||||
func (l *CustomFieldsLoader) LoadAll(keys []int) ([]models.CustomFieldMap, []error) {
|
||||
results := make([]func() (models.CustomFieldMap, error), len(keys))
|
||||
|
||||
for i, key := range keys {
|
||||
results[i] = l.LoadThunk(key)
|
||||
}
|
||||
|
||||
customFieldMaps := make([]models.CustomFieldMap, len(keys))
|
||||
errors := make([]error, len(keys))
|
||||
for i, thunk := range results {
|
||||
customFieldMaps[i], errors[i] = thunk()
|
||||
}
|
||||
return customFieldMaps, errors
|
||||
}
|
||||
|
||||
// LoadAllThunk returns a function that when called will block waiting for a CustomFieldMaps.
|
||||
// This method should be used if you want one goroutine to make requests to many
|
||||
// different data loaders without blocking until the thunk is called.
|
||||
func (l *CustomFieldsLoader) LoadAllThunk(keys []int) func() ([]models.CustomFieldMap, []error) {
|
||||
results := make([]func() (models.CustomFieldMap, error), len(keys))
|
||||
for i, key := range keys {
|
||||
results[i] = l.LoadThunk(key)
|
||||
}
|
||||
return func() ([]models.CustomFieldMap, []error) {
|
||||
customFieldMaps := make([]models.CustomFieldMap, len(keys))
|
||||
errors := make([]error, len(keys))
|
||||
for i, thunk := range results {
|
||||
customFieldMaps[i], errors[i] = thunk()
|
||||
}
|
||||
return customFieldMaps, errors
|
||||
}
|
||||
}
|
||||
|
||||
// Prime the cache with the provided key and value. If the key already exists, no change is made
|
||||
// and false is returned.
|
||||
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
|
||||
func (l *CustomFieldsLoader) Prime(key int, value models.CustomFieldMap) bool {
|
||||
l.mu.Lock()
|
||||
var found bool
|
||||
if _, found = l.cache[key]; !found {
|
||||
l.unsafeSet(key, value)
|
||||
}
|
||||
l.mu.Unlock()
|
||||
return !found
|
||||
}
|
||||
|
||||
// Clear the value at key from the cache, if it exists
|
||||
func (l *CustomFieldsLoader) Clear(key int) {
|
||||
l.mu.Lock()
|
||||
delete(l.cache, key)
|
||||
l.mu.Unlock()
|
||||
}
|
||||
|
||||
func (l *CustomFieldsLoader) unsafeSet(key int, value models.CustomFieldMap) {
|
||||
if l.cache == nil {
|
||||
l.cache = map[int]models.CustomFieldMap{}
|
||||
}
|
||||
l.cache[key] = value
|
||||
}
|
||||
|
||||
// keyIndex will return the location of the key in the batch, if its not found
|
||||
// it will add the key to the batch
|
||||
func (b *customFieldsLoaderBatch) keyIndex(l *CustomFieldsLoader, key int) int {
|
||||
for i, existingKey := range b.keys {
|
||||
if key == existingKey {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
pos := len(b.keys)
|
||||
b.keys = append(b.keys, key)
|
||||
if pos == 0 {
|
||||
go b.startTimer(l)
|
||||
}
|
||||
|
||||
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
|
||||
if !b.closing {
|
||||
b.closing = true
|
||||
l.batch = nil
|
||||
go b.end(l)
|
||||
}
|
||||
}
|
||||
|
||||
return pos
|
||||
}
|
||||
|
||||
func (b *customFieldsLoaderBatch) startTimer(l *CustomFieldsLoader) {
|
||||
time.Sleep(l.wait)
|
||||
l.mu.Lock()
|
||||
|
||||
// we must have hit a batch limit and are already finalizing this batch
|
||||
if b.closing {
|
||||
l.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
l.batch = nil
|
||||
l.mu.Unlock()
|
||||
|
||||
b.end(l)
|
||||
}
|
||||
|
||||
func (b *customFieldsLoaderBatch) end(l *CustomFieldsLoader) {
|
||||
b.data, b.error = l.fetch(b.keys)
|
||||
close(b.done)
|
||||
}
|
||||
@@ -13,6 +13,7 @@
|
||||
//go:generate go run github.com/vektah/dataloaden SceneFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
|
||||
//go:generate go run github.com/vektah/dataloaden ImageFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
|
||||
//go:generate go run github.com/vektah/dataloaden GalleryFileIDsLoader int []github.com/stashapp/stash/pkg/models.FileID
|
||||
//go:generate go run github.com/vektah/dataloaden CustomFieldsLoader int github.com/stashapp/stash/pkg/models.CustomFieldMap
|
||||
//go:generate go run github.com/vektah/dataloaden SceneOCountLoader int int
|
||||
//go:generate go run github.com/vektah/dataloaden ScenePlayCountLoader int int
|
||||
//go:generate go run github.com/vektah/dataloaden SceneOHistoryLoader int []time.Time
|
||||
@@ -51,13 +52,16 @@ type Loaders struct {
|
||||
ImageFiles *ImageFileIDsLoader
|
||||
GalleryFiles *GalleryFileIDsLoader
|
||||
|
||||
GalleryByID *GalleryLoader
|
||||
ImageByID *ImageLoader
|
||||
PerformerByID *PerformerLoader
|
||||
StudioByID *StudioLoader
|
||||
TagByID *TagLoader
|
||||
GroupByID *GroupLoader
|
||||
FileByID *FileLoader
|
||||
GalleryByID *GalleryLoader
|
||||
ImageByID *ImageLoader
|
||||
|
||||
PerformerByID *PerformerLoader
|
||||
PerformerCustomFields *CustomFieldsLoader
|
||||
|
||||
StudioByID *StudioLoader
|
||||
TagByID *TagLoader
|
||||
GroupByID *GroupLoader
|
||||
FileByID *FileLoader
|
||||
}
|
||||
|
||||
type Middleware struct {
|
||||
@@ -88,6 +92,11 @@ func (m Middleware) Middleware(next http.Handler) http.Handler {
|
||||
maxBatch: maxBatch,
|
||||
fetch: m.fetchPerformers(ctx),
|
||||
},
|
||||
PerformerCustomFields: &CustomFieldsLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
fetch: m.fetchPerformerCustomFields(ctx),
|
||||
},
|
||||
StudioByID: &StudioLoader{
|
||||
wait: wait,
|
||||
maxBatch: maxBatch,
|
||||
@@ -214,6 +223,18 @@ func (m Middleware) fetchPerformers(ctx context.Context) func(keys []int) ([]*mo
|
||||
}
|
||||
}
|
||||
|
||||
func (m Middleware) fetchPerformerCustomFields(ctx context.Context) func(keys []int) ([]models.CustomFieldMap, []error) {
|
||||
return func(keys []int) (ret []models.CustomFieldMap, errs []error) {
|
||||
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
ret, err = m.Repository.Performer.GetCustomFieldsBulk(ctx, keys)
|
||||
return err
|
||||
})
|
||||
|
||||
return ret, toErrorSlice(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m Middleware) fetchStudios(ctx context.Context) func(keys []int) ([]*models.Studio, []error) {
|
||||
return func(keys []int) (ret []*models.Studio, errs []error) {
|
||||
err := m.Repository.WithDB(ctx, func(ctx context.Context) error {
|
||||
|
||||
Reference in New Issue
Block a user