mirror of
https://github.com/stashapp/stash.git
synced 2025-12-16 20:07:05 +03:00
Add filesystem based blob storage (#3187)
* Refactor transaction hooks. Add preCommit * Add BlobStore * Use blobStore for tag images * Use blobStore for studio images * Use blobStore for performer images * Use blobStore for scene covers * Don't generate screenshots in legacy directory * Run post-hooks outside original transaction * Use blobStore for movie images * Remove unnecessary DestroyImage methods * Add missing filter for scene cover * Add covers to generate options * Add generate cover option to UI * Add screenshot migration * Delete thumb files as part of screenshot migration
This commit is contained in:
@@ -34,6 +34,8 @@ models:
|
||||
title:
|
||||
resolver: true
|
||||
# autobind on config causes generation issues
|
||||
BlobsStorageType:
|
||||
model: github.com/stashapp/stash/internal/manager/config.BlobsStorageType
|
||||
StashConfig:
|
||||
model: github.com/stashapp/stash/internal/manager/config.StashConfig
|
||||
StashConfigInput:
|
||||
|
||||
@@ -10,6 +10,8 @@ fragment ConfigGeneralData on ConfigGeneralResult {
|
||||
metadataPath
|
||||
scrapersPath
|
||||
cachePath
|
||||
blobsPath
|
||||
blobsStorage
|
||||
calculateMD5
|
||||
videoFileNamingAlgorithm
|
||||
parallelTasks
|
||||
@@ -131,6 +133,7 @@ fragment ConfigDefaultSettingsData on ConfigDefaultSettingsResult {
|
||||
scan {
|
||||
useFileMetadata
|
||||
stripFileExtension
|
||||
scanGenerateCovers
|
||||
scanGeneratePreviews
|
||||
scanGenerateImagePreviews
|
||||
scanGenerateSprites
|
||||
@@ -159,6 +162,7 @@ fragment ConfigDefaultSettingsData on ConfigDefaultSettingsResult {
|
||||
}
|
||||
|
||||
generate {
|
||||
covers
|
||||
sprites
|
||||
previews
|
||||
imagePreviews
|
||||
|
||||
7
graphql/documents/mutations/migration.graphql
Normal file
7
graphql/documents/mutations/migration.graphql
Normal file
@@ -0,0 +1,7 @@
|
||||
mutation MigrateSceneScreenshots($input: MigrateSceneScreenshotsInput!) {
|
||||
migrateSceneScreenshots(input: $input)
|
||||
}
|
||||
|
||||
mutation MigrateBlobs($input: MigrateBlobsInput!) {
|
||||
migrateBlobs(input: $input)
|
||||
}
|
||||
@@ -287,8 +287,13 @@ type Mutation {
|
||||
metadataClean(input: CleanMetadataInput!): ID!
|
||||
"""Identifies scenes using scrapers. Returns the job ID"""
|
||||
metadataIdentify(input: IdentifyMetadataInput!): ID!
|
||||
|
||||
"""Migrate generated files for the current hash naming"""
|
||||
migrateHashNaming: ID!
|
||||
"""Migrates legacy scene screenshot files into the blob storage"""
|
||||
migrateSceneScreenshots(input: MigrateSceneScreenshotsInput!): ID!
|
||||
"""Migrates blobs from the old storage system to the current one"""
|
||||
migrateBlobs(input: MigrateBlobsInput!): ID!
|
||||
|
||||
"""Anonymise the database in a separate file. Optionally returns a link to download the database file"""
|
||||
anonymiseDatabase(input: AnonymiseDatabaseInput!): String
|
||||
|
||||
@@ -8,6 +8,8 @@ input SetupInput {
|
||||
generatedLocation: String!
|
||||
"""Empty to indicate default"""
|
||||
cacheLocation: String!
|
||||
"""Empty to indicate database storage for blobs"""
|
||||
blobsLocation: String!
|
||||
}
|
||||
|
||||
enum StreamingResolutionEnum {
|
||||
@@ -34,6 +36,13 @@ enum HashAlgorithm {
|
||||
"oshash", OSHASH
|
||||
}
|
||||
|
||||
enum BlobsStorageType {
|
||||
# blobs are stored in the database
|
||||
"Database", DATABASE
|
||||
# blobs are stored in the filesystem under the configured blobs directory
|
||||
"Filesystem", FILESYSTEM
|
||||
}
|
||||
|
||||
input ConfigGeneralInput {
|
||||
"""Array of file paths to content"""
|
||||
stashes: [StashConfigInput!]
|
||||
@@ -49,6 +58,10 @@ input ConfigGeneralInput {
|
||||
scrapersPath: String
|
||||
"""Path to cache"""
|
||||
cachePath: String
|
||||
"""Path to blobs - required for filesystem blob storage"""
|
||||
blobsPath: String
|
||||
"""Where to store blobs"""
|
||||
blobsStorage: BlobsStorageType
|
||||
"""Whether to calculate MD5 checksums for scene video files"""
|
||||
calculateMD5: Boolean
|
||||
"""Hash algorithm to use for generated file naming"""
|
||||
@@ -154,6 +167,10 @@ type ConfigGeneralResult {
|
||||
scrapersPath: String!
|
||||
"""Path to cache"""
|
||||
cachePath: String!
|
||||
"""Path to blobs - required for filesystem blob storage"""
|
||||
blobsPath: String!
|
||||
"""Where to store blobs"""
|
||||
blobsStorage: BlobsStorageType!
|
||||
"""Whether to calculate MD5 checksums for scene video files"""
|
||||
calculateMD5: Boolean!
|
||||
"""Hash algorithm to use for generated file naming"""
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
scalar Upload
|
||||
|
||||
input GenerateMetadataInput {
|
||||
covers: Boolean
|
||||
sprites: Boolean
|
||||
previews: Boolean
|
||||
imagePreviews: Boolean
|
||||
@@ -37,6 +38,7 @@ input GeneratePreviewOptionsInput {
|
||||
}
|
||||
|
||||
type GenerateMetadataOptions {
|
||||
covers: Boolean
|
||||
sprites: Boolean
|
||||
previews: Boolean
|
||||
imagePreviews: Boolean
|
||||
@@ -84,6 +86,8 @@ input ScanMetadataInput {
|
||||
|
||||
"""Strip file extension from title"""
|
||||
stripFileExtension: Boolean @deprecated(reason: "Not implemented")
|
||||
"""Generate covers during scan"""
|
||||
scanGenerateCovers: Boolean
|
||||
"""Generate previews during scan"""
|
||||
scanGeneratePreviews: Boolean
|
||||
"""Generate image previews during scan"""
|
||||
@@ -101,9 +105,11 @@ input ScanMetadataInput {
|
||||
|
||||
type ScanMetadataOptions {
|
||||
"""Set name, date, details from metadata (if present)"""
|
||||
useFileMetadata: Boolean!
|
||||
useFileMetadata: Boolean! @deprecated(reason: "Not implemented")
|
||||
"""Strip file extension from title"""
|
||||
stripFileExtension: Boolean!
|
||||
stripFileExtension: Boolean! @deprecated(reason: "Not implemented")
|
||||
"""Generate covers during scan"""
|
||||
scanGenerateCovers: Boolean!
|
||||
"""Generate previews during scan"""
|
||||
scanGeneratePreviews: Boolean!
|
||||
"""Generate image previews during scan"""
|
||||
|
||||
11
graphql/schema/types/migration.graphql
Normal file
11
graphql/schema/types/migration.graphql
Normal file
@@ -0,0 +1,11 @@
|
||||
input MigrateSceneScreenshotsInput {
|
||||
# if true, delete screenshot files after migrating
|
||||
deleteFiles: Boolean
|
||||
# if true, overwrite existing covers with the covers from the screenshots directory
|
||||
overwriteExisting: Boolean
|
||||
}
|
||||
|
||||
input MigrateBlobsInput {
|
||||
# if true, delete blob data from old storage system
|
||||
deleteOld: Boolean
|
||||
}
|
||||
@@ -134,6 +134,28 @@ func (r *mutationResolver) ConfigureGeneral(ctx context.Context, input ConfigGen
|
||||
refreshStreamManager = true
|
||||
}
|
||||
|
||||
refreshBlobStorage := false
|
||||
existingBlobsPath := c.GetBlobsPath()
|
||||
if input.BlobsPath != nil && existingBlobsPath != *input.BlobsPath {
|
||||
if err := validateDir(config.BlobsPath, *input.BlobsPath, true); err != nil {
|
||||
return makeConfigGeneralResult(), err
|
||||
}
|
||||
|
||||
c.Set(config.BlobsPath, input.BlobsPath)
|
||||
refreshBlobStorage = true
|
||||
}
|
||||
|
||||
if input.BlobsStorage != nil && *input.BlobsStorage != c.GetBlobsStorage() {
|
||||
if *input.BlobsStorage == config.BlobStorageTypeFilesystem && c.GetBlobsPath() == "" {
|
||||
return makeConfigGeneralResult(), fmt.Errorf("blobs path must be set when using filesystem storage")
|
||||
}
|
||||
|
||||
// TODO - migrate between systems
|
||||
c.Set(config.BlobsStorage, input.BlobsStorage)
|
||||
|
||||
refreshBlobStorage = true
|
||||
}
|
||||
|
||||
if input.VideoFileNamingAlgorithm != nil && *input.VideoFileNamingAlgorithm != c.GetVideoFileNamingAlgorithm() {
|
||||
calculateMD5 := c.IsCalculateMD5()
|
||||
if input.CalculateMd5 != nil {
|
||||
@@ -336,6 +358,9 @@ func (r *mutationResolver) ConfigureGeneral(ctx context.Context, input ConfigGen
|
||||
if refreshStreamManager {
|
||||
manager.GetInstance().RefreshStreamManager()
|
||||
}
|
||||
if refreshBlobStorage {
|
||||
manager.GetInstance().SetBlobStoreOptions()
|
||||
}
|
||||
|
||||
return makeConfigGeneralResult(), nil
|
||||
}
|
||||
@@ -530,7 +555,7 @@ func (r *mutationResolver) ConfigureDefaults(ctx context.Context, input ConfigDe
|
||||
}
|
||||
|
||||
if input.Scan != nil {
|
||||
c.Set(config.DefaultScanSettings, input.Scan.ScanMetadataOptions)
|
||||
c.Set(config.DefaultScanSettings, input.Scan)
|
||||
}
|
||||
|
||||
if input.AutoTag != nil {
|
||||
|
||||
40
internal/api/resolver_mutation_migrate.go
Normal file
40
internal/api/resolver_mutation_migrate.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/stashapp/stash/internal/manager"
|
||||
"github.com/stashapp/stash/internal/manager/task"
|
||||
"github.com/stashapp/stash/pkg/scene"
|
||||
"github.com/stashapp/stash/pkg/utils"
|
||||
)
|
||||
|
||||
func (r *mutationResolver) MigrateSceneScreenshots(ctx context.Context, input MigrateSceneScreenshotsInput) (string, error) {
|
||||
db := manager.GetInstance().Database
|
||||
t := &task.MigrateSceneScreenshotsJob{
|
||||
ScreenshotsPath: manager.GetInstance().Paths.Generated.Screenshots,
|
||||
Input: scene.MigrateSceneScreenshotsInput{
|
||||
DeleteFiles: utils.IsTrue(input.DeleteFiles),
|
||||
OverwriteExisting: utils.IsTrue(input.OverwriteExisting),
|
||||
},
|
||||
SceneRepo: db.Scene,
|
||||
TxnManager: db,
|
||||
}
|
||||
jobID := manager.GetInstance().JobManager.Add(ctx, "Migrating scene screenshots to blobs...", t)
|
||||
|
||||
return strconv.Itoa(jobID), nil
|
||||
}
|
||||
|
||||
func (r *mutationResolver) MigrateBlobs(ctx context.Context, input MigrateBlobsInput) (string, error) {
|
||||
db := manager.GetInstance().Database
|
||||
t := &task.MigrateBlobsJob{
|
||||
TxnManager: db,
|
||||
BlobStore: db.Blobs,
|
||||
Vacuumer: db,
|
||||
DeleteOld: utils.IsTrue(input.DeleteOld),
|
||||
}
|
||||
jobID := manager.GetInstance().JobManager.Add(ctx, "Migrating blobs...", t)
|
||||
|
||||
return strconv.Itoa(jobID), nil
|
||||
}
|
||||
@@ -111,7 +111,13 @@ func (r *mutationResolver) MovieCreate(ctx context.Context, input MovieCreateInp
|
||||
|
||||
// update image table
|
||||
if len(frontimageData) > 0 {
|
||||
if err := qb.UpdateImages(ctx, movie.ID, frontimageData, backimageData); err != nil {
|
||||
if err := qb.UpdateFrontImage(ctx, movie.ID, frontimageData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(backimageData) > 0 {
|
||||
if err := qb.UpdateBackImage(ctx, movie.ID, backimageData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -184,35 +190,15 @@ func (r *mutationResolver) MovieUpdate(ctx context.Context, input MovieUpdateInp
|
||||
}
|
||||
|
||||
// update image table
|
||||
if frontImageIncluded || backImageIncluded {
|
||||
if !frontImageIncluded {
|
||||
frontimageData, err = qb.GetFrontImage(ctx, updatedMovie.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !backImageIncluded {
|
||||
backimageData, err = qb.GetBackImage(ctx, updatedMovie.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if frontImageIncluded {
|
||||
if err := qb.UpdateFrontImage(ctx, movie.ID, frontimageData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(frontimageData) == 0 && len(backimageData) == 0 {
|
||||
// both images are being nulled. Destroy them.
|
||||
if err := qb.DestroyImages(ctx, movie.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// HACK - if front image is null and back image is not null, then set the front image
|
||||
// to the default image since we can't have a null front image and a non-null back image
|
||||
if frontimageData == nil && backimageData != nil {
|
||||
frontimageData, _ = utils.ProcessImageInput(ctx, models.DefaultMovieImage)
|
||||
}
|
||||
|
||||
if err := qb.UpdateImages(ctx, movie.ID, frontimageData, backimageData); err != nil {
|
||||
return err
|
||||
}
|
||||
if backImageIncluded {
|
||||
if err := qb.UpdateBackImage(ctx, movie.ID, backimageData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -9,14 +9,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stashapp/stash/internal/manager"
|
||||
"github.com/stashapp/stash/internal/manager/config"
|
||||
"github.com/stashapp/stash/pkg/file"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/plugin"
|
||||
"github.com/stashapp/stash/pkg/scene"
|
||||
"github.com/stashapp/stash/pkg/sliceutil/intslice"
|
||||
"github.com/stashapp/stash/pkg/sliceutil/stringslice"
|
||||
"github.com/stashapp/stash/pkg/txn"
|
||||
"github.com/stashapp/stash/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -320,13 +318,6 @@ func (r *mutationResolver) sceneUpdateCoverImage(ctx context.Context, s *models.
|
||||
if err := qb.UpdateCover(ctx, s.ID, coverImageData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.Path != "" {
|
||||
// update the file-based screenshot after commit
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) error {
|
||||
return scene.SetScreenshot(manager.GetInstance().Paths, s.GetHash(config.GetInstance().GetVideoFileNamingAlgorithm()), coverImageData)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -62,7 +62,7 @@ func (r *mutationResolver) SubmitStashBoxSceneDraft(ctx context.Context, input S
|
||||
return fmt.Errorf("scene with id %d not found", id)
|
||||
}
|
||||
|
||||
cover, err := r.sceneService.GetCover(ctx, scene)
|
||||
cover, err := qb.GetCover(ctx, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting scene cover: %w", err)
|
||||
}
|
||||
|
||||
@@ -176,15 +176,10 @@ func (r *mutationResolver) StudioUpdate(ctx context.Context, input StudioUpdateI
|
||||
}
|
||||
|
||||
// update image table
|
||||
if len(imageData) > 0 {
|
||||
if imageIncluded {
|
||||
if err := qb.UpdateImage(ctx, s.ID, imageData); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if imageIncluded {
|
||||
// must be unsetting
|
||||
if err := qb.DestroyImage(ctx, s.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Save the stash_ids
|
||||
|
||||
@@ -208,15 +208,10 @@ func (r *mutationResolver) TagUpdate(ctx context.Context, input TagUpdateInput)
|
||||
}
|
||||
|
||||
// update image table
|
||||
if len(imageData) > 0 {
|
||||
if imageIncluded {
|
||||
if err := qb.UpdateImage(ctx, tagID, imageData); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if imageIncluded {
|
||||
// must be unsetting
|
||||
if err := qb.DestroyImage(ctx, tagID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if translator.hasField("aliases") {
|
||||
|
||||
@@ -91,6 +91,8 @@ func makeConfigGeneralResult() *ConfigGeneralResult {
|
||||
ConfigFilePath: config.GetConfigFile(),
|
||||
ScrapersPath: config.GetScrapersPath(),
|
||||
CachePath: config.GetCachePath(),
|
||||
BlobsPath: config.GetBlobsPath(),
|
||||
BlobsStorage: config.GetBlobsStorage(),
|
||||
CalculateMd5: config.IsCalculateMD5(),
|
||||
VideoFileNamingAlgorithm: config.GetVideoFileNamingAlgorithm(),
|
||||
ParallelTasks: config.GetParallelTasks(),
|
||||
|
||||
@@ -42,8 +42,9 @@ func (rs movieRoutes) FrontImage(w http.ResponseWriter, r *http.Request) {
|
||||
var image []byte
|
||||
if defaultParam != "true" {
|
||||
readTxnErr := txn.WithReadTxn(r.Context(), rs.txnManager, func(ctx context.Context) error {
|
||||
image, _ = rs.movieFinder.GetFrontImage(ctx, movie.ID)
|
||||
return nil
|
||||
var err error
|
||||
image, err = rs.movieFinder.GetFrontImage(ctx, movie.ID)
|
||||
return err
|
||||
})
|
||||
if errors.Is(readTxnErr, context.Canceled) {
|
||||
return
|
||||
@@ -68,8 +69,9 @@ func (rs movieRoutes) BackImage(w http.ResponseWriter, r *http.Request) {
|
||||
var image []byte
|
||||
if defaultParam != "true" {
|
||||
readTxnErr := txn.WithReadTxn(r.Context(), rs.txnManager, func(ctx context.Context) error {
|
||||
image, _ = rs.movieFinder.GetBackImage(ctx, movie.ID)
|
||||
return nil
|
||||
var err error
|
||||
image, err = rs.movieFinder.GetBackImage(ctx, movie.ID)
|
||||
return err
|
||||
})
|
||||
if errors.Is(readTxnErr, context.Canceled) {
|
||||
return
|
||||
|
||||
@@ -42,8 +42,9 @@ func (rs performerRoutes) Image(w http.ResponseWriter, r *http.Request) {
|
||||
var image []byte
|
||||
if defaultParam != "true" {
|
||||
readTxnErr := txn.WithReadTxn(r.Context(), rs.txnManager, func(ctx context.Context) error {
|
||||
image, _ = rs.performerFinder.GetImage(ctx, performer.ID)
|
||||
return nil
|
||||
var err error
|
||||
image, err = rs.performerFinder.GetImage(ctx, performer.ID)
|
||||
return err
|
||||
})
|
||||
if errors.Is(readTxnErr, context.Canceled) {
|
||||
return
|
||||
|
||||
@@ -42,8 +42,9 @@ func (rs studioRoutes) Image(w http.ResponseWriter, r *http.Request) {
|
||||
var image []byte
|
||||
if defaultParam != "true" {
|
||||
readTxnErr := txn.WithReadTxn(r.Context(), rs.txnManager, func(ctx context.Context) error {
|
||||
image, _ = rs.studioFinder.GetImage(ctx, studio.ID)
|
||||
return nil
|
||||
var err error
|
||||
image, err = rs.studioFinder.GetImage(ctx, studio.ID)
|
||||
return err
|
||||
})
|
||||
if errors.Is(readTxnErr, context.Canceled) {
|
||||
return
|
||||
|
||||
@@ -42,8 +42,9 @@ func (rs tagRoutes) Image(w http.ResponseWriter, r *http.Request) {
|
||||
var image []byte
|
||||
if defaultParam != "true" {
|
||||
readTxnErr := txn.WithReadTxn(r.Context(), rs.txnManager, func(ctx context.Context) error {
|
||||
image, _ = rs.tagFinder.GetImage(ctx, tag.ID)
|
||||
return nil
|
||||
var err error
|
||||
image, err = rs.tagFinder.GetImage(ctx, tag.ID)
|
||||
return err
|
||||
})
|
||||
if errors.Is(readTxnErr, context.Canceled) {
|
||||
return
|
||||
|
||||
@@ -35,7 +35,6 @@ type SceneIdentifier struct {
|
||||
|
||||
DefaultOptions *MetadataOptions
|
||||
Sources []ScraperSource
|
||||
ScreenshotSetter scene.ScreenshotSetter
|
||||
SceneUpdatePostHookExecutor SceneUpdatePostHookExecutor
|
||||
}
|
||||
|
||||
@@ -216,7 +215,7 @@ func (t *SceneIdentifier) modifyScene(ctx context.Context, txnManager txn.Manage
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := updater.Update(ctx, t.SceneReaderUpdater, t.ScreenshotSetter); err != nil {
|
||||
if _, err := updater.Update(ctx, t.SceneReaderUpdater); err != nil {
|
||||
return fmt.Errorf("error updating scene: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -31,12 +31,15 @@ const (
|
||||
BackupDirectoryPath = "backup_directory_path"
|
||||
Generated = "generated"
|
||||
Metadata = "metadata"
|
||||
BlobsPath = "blobs_path"
|
||||
Downloads = "downloads"
|
||||
ApiKey = "api_key"
|
||||
Username = "username"
|
||||
Password = "password"
|
||||
MaxSessionAge = "max_session_age"
|
||||
|
||||
BlobsStorage = "blobs_storage"
|
||||
|
||||
DefaultMaxSessionAge = 60 * 60 * 1 // 1 hours
|
||||
|
||||
Database = "database"
|
||||
@@ -551,6 +554,22 @@ func (i *Instance) GetGeneratedPath() string {
|
||||
return i.getString(Generated)
|
||||
}
|
||||
|
||||
func (i *Instance) GetBlobsPath() string {
|
||||
return i.getString(BlobsPath)
|
||||
}
|
||||
|
||||
func (i *Instance) GetBlobsStorage() BlobsStorageType {
|
||||
ret := BlobsStorageType(i.getString(BlobsStorage))
|
||||
|
||||
if !ret.IsValid() {
|
||||
// default to database storage
|
||||
// for legacy systems this is probably the safer option
|
||||
ret = BlobStorageTypeDatabase
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (i *Instance) GetMetadataPath() string {
|
||||
return i.getString(Metadata)
|
||||
}
|
||||
@@ -1458,6 +1477,12 @@ func (i *Instance) Validate() error {
|
||||
}
|
||||
}
|
||||
|
||||
if i.GetBlobsStorage() == BlobStorageTypeFilesystem && i.viper(BlobsPath).GetString(BlobsPath) == "" {
|
||||
return MissingConfigError{
|
||||
missingFields: []string{BlobsPath},
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
50
internal/manager/config/enums.go
Normal file
50
internal/manager/config/enums.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type BlobsStorageType string
|
||||
|
||||
const (
|
||||
// Database
|
||||
BlobStorageTypeDatabase BlobsStorageType = "DATABASE"
|
||||
// Filesystem
|
||||
BlobStorageTypeFilesystem BlobsStorageType = "FILESYSTEM"
|
||||
)
|
||||
|
||||
var AllBlobStorageType = []BlobsStorageType{
|
||||
BlobStorageTypeDatabase,
|
||||
BlobStorageTypeFilesystem,
|
||||
}
|
||||
|
||||
func (e BlobsStorageType) IsValid() bool {
|
||||
switch e {
|
||||
case BlobStorageTypeDatabase, BlobStorageTypeFilesystem:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e BlobsStorageType) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (e *BlobsStorageType) UnmarshalGQL(v interface{}) error {
|
||||
str, ok := v.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("enums must be strings")
|
||||
}
|
||||
|
||||
*e = BlobsStorageType(str)
|
||||
if !e.IsValid() {
|
||||
return fmt.Errorf("%s is not a valid BlobStorageType", str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e BlobsStorageType) MarshalGQL(w io.Writer) {
|
||||
fmt.Fprint(w, strconv.Quote(e.String()))
|
||||
}
|
||||
@@ -7,6 +7,8 @@ type ScanMetadataOptions struct {
|
||||
// Strip file extension from title
|
||||
// Deprecated: not implemented
|
||||
StripFileExtension bool `json:"stripFileExtension"`
|
||||
// Generate scene covers during scan
|
||||
ScanGenerateCovers bool `json:"scanGenerateCovers"`
|
||||
// Generate previews during scan
|
||||
ScanGeneratePreviews bool `json:"scanGeneratePreviews"`
|
||||
// Generate image previews during scan
|
||||
|
||||
@@ -26,11 +26,9 @@ import (
|
||||
"github.com/stashapp/stash/pkg/image"
|
||||
"github.com/stashapp/stash/pkg/job"
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/models/paths"
|
||||
"github.com/stashapp/stash/pkg/plugin"
|
||||
"github.com/stashapp/stash/pkg/scene"
|
||||
"github.com/stashapp/stash/pkg/scene/generate"
|
||||
"github.com/stashapp/stash/pkg/scraper"
|
||||
"github.com/stashapp/stash/pkg/session"
|
||||
"github.com/stashapp/stash/pkg/sqlite"
|
||||
@@ -102,6 +100,8 @@ type SetupInput struct {
|
||||
GeneratedLocation string `json:"generatedLocation"`
|
||||
// Empty to indicate default
|
||||
CacheLocation string `json:"cacheLocation"`
|
||||
// Empty to indicate database storage for blobs
|
||||
BlobsLocation string `json:"blobsLocation"`
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
@@ -290,20 +290,6 @@ func galleryFileFilter(ctx context.Context, f file.File) bool {
|
||||
return isZip(f.Base().Basename)
|
||||
}
|
||||
|
||||
type coverGenerator struct {
|
||||
}
|
||||
|
||||
func (g *coverGenerator) GenerateCover(ctx context.Context, scene *models.Scene, f *file.VideoFile) error {
|
||||
gg := generate.Generator{
|
||||
Encoder: instance.FFMPEG,
|
||||
FFMpegConfig: instance.Config,
|
||||
LockManager: instance.ReadLockManager,
|
||||
ScenePaths: instance.Paths.Scene,
|
||||
}
|
||||
|
||||
return gg.Screenshot(ctx, f.Path, scene.GetHash(instance.Config.GetVideoFileNamingAlgorithm()), f.Width, f.Duration, generate.ScreenshotOptions{})
|
||||
}
|
||||
|
||||
func makeScanner(db *sqlite.Database, pluginCache *plugin.Cache) *file.Scanner {
|
||||
return &file.Scanner{
|
||||
Repository: file.Repository{
|
||||
@@ -458,7 +444,7 @@ func (s *Manager) PostInit(ctx context.Context) error {
|
||||
logger.Warnf("could not set initial configuration: %v", err)
|
||||
}
|
||||
|
||||
*s.Paths = paths.NewPaths(s.Config.GetGeneratedPath())
|
||||
*s.Paths = paths.NewPaths(s.Config.GetGeneratedPath(), s.Config.GetBlobsPath())
|
||||
s.RefreshConfig()
|
||||
s.SessionStore = session.NewStore(s.Config)
|
||||
s.PluginCache.RegisterSessionStore(s.SessionStore)
|
||||
@@ -467,6 +453,8 @@ func (s *Manager) PostInit(ctx context.Context) error {
|
||||
logger.Errorf("Error reading plugin configs: %s", err.Error())
|
||||
}
|
||||
|
||||
s.SetBlobStoreOptions()
|
||||
|
||||
s.ScraperCache = instance.initScraperCache()
|
||||
writeStashIcon()
|
||||
|
||||
@@ -509,6 +497,17 @@ func (s *Manager) PostInit(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Manager) SetBlobStoreOptions() {
|
||||
storageType := s.Config.GetBlobsStorage()
|
||||
blobsPath := s.Config.GetBlobsPath()
|
||||
|
||||
s.Database.SetBlobStoreOptions(sqlite.BlobStoreOptions{
|
||||
UseFilesystem: storageType == config.BlobStorageTypeFilesystem,
|
||||
UseDatabase: storageType == config.BlobStorageTypeDatabase,
|
||||
Path: blobsPath,
|
||||
})
|
||||
}
|
||||
|
||||
func writeStashIcon() {
|
||||
p := FaviconProvider{
|
||||
UIBox: ui.UIBox,
|
||||
@@ -540,7 +539,7 @@ func (s *Manager) initScraperCache() *scraper.Cache {
|
||||
}
|
||||
|
||||
func (s *Manager) RefreshConfig() {
|
||||
*s.Paths = paths.NewPaths(s.Config.GetGeneratedPath())
|
||||
*s.Paths = paths.NewPaths(s.Config.GetGeneratedPath(), s.Config.GetBlobsPath())
|
||||
config := s.Config
|
||||
if config.Validate() == nil {
|
||||
if err := fsutil.EnsureDir(s.Paths.Generated.Screenshots); err != nil {
|
||||
@@ -617,7 +616,7 @@ func (s *Manager) Setup(ctx context.Context, input SetupInput) error {
|
||||
configDir := filepath.Dir(configFile)
|
||||
|
||||
if exists, _ := fsutil.DirExists(configDir); !exists {
|
||||
if err := os.Mkdir(configDir, 0755); err != nil {
|
||||
if err := os.MkdirAll(configDir, 0755); err != nil {
|
||||
return fmt.Errorf("error creating config directory: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -632,7 +631,7 @@ func (s *Manager) Setup(ctx context.Context, input SetupInput) error {
|
||||
// create the generated directory if it does not exist
|
||||
if !c.HasOverride(config.Generated) {
|
||||
if exists, _ := fsutil.DirExists(input.GeneratedLocation); !exists {
|
||||
if err := os.Mkdir(input.GeneratedLocation, 0755); err != nil {
|
||||
if err := os.MkdirAll(input.GeneratedLocation, 0755); err != nil {
|
||||
return fmt.Errorf("error creating generated directory: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -643,7 +642,7 @@ func (s *Manager) Setup(ctx context.Context, input SetupInput) error {
|
||||
// create the cache directory if it does not exist
|
||||
if !c.HasOverride(config.Cache) {
|
||||
if exists, _ := fsutil.DirExists(input.CacheLocation); !exists {
|
||||
if err := os.Mkdir(input.CacheLocation, 0755); err != nil {
|
||||
if err := os.MkdirAll(input.CacheLocation, 0755); err != nil {
|
||||
return fmt.Errorf("error creating cache directory: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -651,6 +650,22 @@ func (s *Manager) Setup(ctx context.Context, input SetupInput) error {
|
||||
s.Config.Set(config.Cache, input.CacheLocation)
|
||||
}
|
||||
|
||||
// if blobs path was provided then use filesystem based blob storage
|
||||
if input.BlobsLocation != "" {
|
||||
if !c.HasOverride(config.BlobsPath) {
|
||||
if exists, _ := fsutil.DirExists(input.BlobsLocation); !exists {
|
||||
if err := os.MkdirAll(input.BlobsLocation, 0755); err != nil {
|
||||
return fmt.Errorf("error creating blobs directory: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.Config.Set(config.BlobsPath, input.BlobsLocation)
|
||||
s.Config.Set(config.BlobsStorage, config.BlobStorageTypeFilesystem)
|
||||
} else {
|
||||
s.Config.Set(config.BlobsStorage, config.BlobStorageTypeDatabase)
|
||||
}
|
||||
|
||||
// set the configuration
|
||||
if !c.HasOverride(config.Database) {
|
||||
s.Config.Set(config.Database, input.DatabaseFile)
|
||||
|
||||
@@ -194,11 +194,11 @@ func (s *Manager) generateScreenshot(ctx context.Context, sceneId string, at *fl
|
||||
return
|
||||
}
|
||||
|
||||
task := GenerateScreenshotTask{
|
||||
txnManager: s.Repository,
|
||||
Scene: *scene,
|
||||
ScreenshotAt: at,
|
||||
fileNamingAlgorithm: config.GetInstance().GetVideoFileNamingAlgorithm(),
|
||||
task := GenerateCoverTask{
|
||||
txnManager: s.Repository,
|
||||
Scene: *scene,
|
||||
ScreenshotAt: at,
|
||||
Overwrite: true,
|
||||
}
|
||||
|
||||
task.Start(ctx)
|
||||
|
||||
@@ -102,8 +102,6 @@ type SceneService interface {
|
||||
AssignFile(ctx context.Context, sceneID int, fileID file.ID) error
|
||||
Merge(ctx context.Context, sourceIDs []int, destinationID int, values models.ScenePartial) error
|
||||
Destroy(ctx context.Context, scene *models.Scene, fileDeleter *scene.FileDeleter, deleteGenerated, deleteFile bool) error
|
||||
|
||||
GetCover(ctx context.Context, scene *models.Scene) ([]byte, error)
|
||||
}
|
||||
|
||||
type ImageService interface {
|
||||
|
||||
@@ -54,32 +54,32 @@ func (s *SceneServer) StreamSceneDirect(scene *models.Scene, w http.ResponseWrit
|
||||
func (s *SceneServer) ServeScreenshot(scene *models.Scene, w http.ResponseWriter, r *http.Request) {
|
||||
const defaultSceneImage = "scene/scene.svg"
|
||||
|
||||
if scene.Path != "" {
|
||||
filepath := GetInstance().Paths.Scene.GetScreenshotPath(scene.GetHash(config.GetInstance().GetVideoFileNamingAlgorithm()))
|
||||
|
||||
// fall back to the scene image blob if the file isn't present
|
||||
screenshotExists, _ := fsutil.FileExists(filepath)
|
||||
if screenshotExists {
|
||||
http.ServeFile(w, r, filepath)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var cover []byte
|
||||
readTxnErr := txn.WithReadTxn(r.Context(), s.TxnManager, func(ctx context.Context) error {
|
||||
cover, _ = s.SceneCoverGetter.GetCover(ctx, scene.ID)
|
||||
return nil
|
||||
var err error
|
||||
cover, err = s.SceneCoverGetter.GetCover(ctx, scene.ID)
|
||||
return err
|
||||
})
|
||||
if errors.Is(readTxnErr, context.Canceled) {
|
||||
return
|
||||
}
|
||||
if readTxnErr != nil {
|
||||
logger.Warnf("read transaction error on fetch screenshot: %v", readTxnErr)
|
||||
http.Error(w, readTxnErr.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if cover == nil {
|
||||
// fallback to legacy image if present
|
||||
if scene.Path != "" {
|
||||
filepath := GetInstance().Paths.Scene.GetLegacyScreenshotPath(scene.GetHash(config.GetInstance().GetVideoFileNamingAlgorithm()))
|
||||
|
||||
// fall back to the scene image blob if the file isn't present
|
||||
screenshotExists, _ := fsutil.FileExists(filepath)
|
||||
if screenshotExists {
|
||||
http.ServeFile(w, r, filepath)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// fallback to default cover if none found
|
||||
// should always be there
|
||||
f, _ := static.Scene.Open(defaultSceneImage)
|
||||
|
||||
129
internal/manager/task/migrate_blobs.go
Normal file
129
internal/manager/task/migrate_blobs.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package task
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/stashapp/stash/pkg/job"
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/txn"
|
||||
)
|
||||
|
||||
type BlobStoreMigrator interface {
|
||||
Count(ctx context.Context) (int, error)
|
||||
FindBlobs(ctx context.Context, n uint, lastChecksum string) ([]string, error)
|
||||
MigrateBlob(ctx context.Context, checksum string, deleteOld bool) error
|
||||
}
|
||||
|
||||
type Vacuumer interface {
|
||||
Vacuum(ctx context.Context) error
|
||||
}
|
||||
|
||||
type MigrateBlobsJob struct {
|
||||
TxnManager txn.Manager
|
||||
BlobStore BlobStoreMigrator
|
||||
Vacuumer Vacuumer
|
||||
DeleteOld bool
|
||||
}
|
||||
|
||||
func (j *MigrateBlobsJob) Execute(ctx context.Context, progress *job.Progress) {
|
||||
var (
|
||||
count int
|
||||
err error
|
||||
)
|
||||
progress.ExecuteTask("Counting blobs", func() {
|
||||
count, err = j.countBlobs(ctx)
|
||||
progress.SetTotal(count)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
logger.Errorf("Error counting blobs: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if count == 0 {
|
||||
logger.Infof("No blobs to migrate")
|
||||
return
|
||||
}
|
||||
|
||||
logger.Infof("Migrating %d blobs", count)
|
||||
|
||||
progress.ExecuteTask(fmt.Sprintf("Migrating %d blobs", count), func() {
|
||||
err = j.migrateBlobs(ctx, progress)
|
||||
})
|
||||
|
||||
if job.IsCancelled(ctx) {
|
||||
logger.Info("Cancelled migrating blobs")
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logger.Errorf("Error migrating blobs: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// run a vacuum to reclaim space
|
||||
progress.ExecuteTask("Vacuuming database", func() {
|
||||
err = j.Vacuumer.Vacuum(ctx)
|
||||
if err != nil {
|
||||
logger.Errorf("Error vacuuming database: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
logger.Infof("Finished migrating blobs")
|
||||
}
|
||||
|
||||
func (j *MigrateBlobsJob) countBlobs(ctx context.Context) (int, error) {
|
||||
var count int
|
||||
if err := txn.WithReadTxn(ctx, j.TxnManager, func(ctx context.Context) error {
|
||||
var err error
|
||||
count, err = j.BlobStore.Count(ctx)
|
||||
return err
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (j *MigrateBlobsJob) migrateBlobs(ctx context.Context, progress *job.Progress) error {
|
||||
lastChecksum := ""
|
||||
batch, err := j.getBatch(ctx, lastChecksum)
|
||||
|
||||
for len(batch) > 0 && err == nil && ctx.Err() == nil {
|
||||
for _, checksum := range batch {
|
||||
if ctx.Err() != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
lastChecksum = checksum
|
||||
|
||||
progress.ExecuteTask("Migrating blob "+checksum, func() {
|
||||
defer progress.Increment()
|
||||
|
||||
if err := txn.WithTxn(ctx, j.TxnManager, func(ctx context.Context) error {
|
||||
return j.BlobStore.MigrateBlob(ctx, checksum, j.DeleteOld)
|
||||
}); err != nil {
|
||||
logger.Errorf("Error migrating blob %s: %v", checksum, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
batch, err = j.getBatch(ctx, lastChecksum)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (j *MigrateBlobsJob) getBatch(ctx context.Context, lastChecksum string) ([]string, error) {
|
||||
const batchSize = 1000
|
||||
|
||||
var batch []string
|
||||
err := txn.WithReadTxn(ctx, j.TxnManager, func(ctx context.Context) error {
|
||||
var err error
|
||||
batch, err = j.BlobStore.FindBlobs(ctx, batchSize, lastChecksum)
|
||||
return err
|
||||
})
|
||||
|
||||
return batch, err
|
||||
}
|
||||
135
internal/manager/task/migrate_scene_screenshots.go
Normal file
135
internal/manager/task/migrate_scene_screenshots.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package task
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/stashapp/stash/pkg/job"
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/scene"
|
||||
"github.com/stashapp/stash/pkg/txn"
|
||||
)
|
||||
|
||||
type MigrateSceneScreenshotsJob struct {
|
||||
ScreenshotsPath string
|
||||
Input scene.MigrateSceneScreenshotsInput
|
||||
SceneRepo scene.HashFinderCoverUpdater
|
||||
TxnManager txn.Manager
|
||||
}
|
||||
|
||||
func (j *MigrateSceneScreenshotsJob) Execute(ctx context.Context, progress *job.Progress) {
|
||||
var err error
|
||||
progress.ExecuteTask("Counting files", func() {
|
||||
var count int
|
||||
count, err = j.countFiles(ctx)
|
||||
progress.SetTotal(count)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
logger.Errorf("Error counting files: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
progress.ExecuteTask("Migrating files", func() {
|
||||
err = j.migrateFiles(ctx, progress)
|
||||
})
|
||||
|
||||
if job.IsCancelled(ctx) {
|
||||
logger.Info("Cancelled migrating scene screenshots")
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logger.Errorf("Error migrating scene screenshots: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Infof("Finished migrating scene screenshots")
|
||||
}
|
||||
|
||||
func (j *MigrateSceneScreenshotsJob) countFiles(ctx context.Context) (int, error) {
|
||||
f, err := os.Open(j.ScreenshotsPath)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
const batchSize = 1000
|
||||
ret := 0
|
||||
files, err := f.ReadDir(batchSize)
|
||||
for err == nil && ctx.Err() == nil {
|
||||
ret += len(files)
|
||||
|
||||
files, err = f.ReadDir(batchSize)
|
||||
}
|
||||
|
||||
if errors.Is(err, io.EOF) {
|
||||
// end of directory
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
func (j *MigrateSceneScreenshotsJob) migrateFiles(ctx context.Context, progress *job.Progress) error {
|
||||
f, err := os.Open(j.ScreenshotsPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
m := scene.ScreenshotMigrator{
|
||||
Options: j.Input,
|
||||
SceneUpdater: j.SceneRepo,
|
||||
TxnManager: j.TxnManager,
|
||||
}
|
||||
|
||||
const batchSize = 1000
|
||||
files, err := f.ReadDir(batchSize)
|
||||
for err == nil && ctx.Err() == nil {
|
||||
for _, f := range files {
|
||||
if ctx.Err() != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
progress.ExecuteTask("Migrating file "+f.Name(), func() {
|
||||
defer progress.Increment()
|
||||
|
||||
path := filepath.Join(j.ScreenshotsPath, f.Name())
|
||||
|
||||
// sanity check - only process files
|
||||
if f.IsDir() {
|
||||
logger.Warnf("Skipping directory %s", path)
|
||||
return
|
||||
}
|
||||
|
||||
// ignore non-jpg files
|
||||
if !strings.HasSuffix(f.Name(), ".jpg") {
|
||||
return
|
||||
}
|
||||
|
||||
// ignore .thumb files
|
||||
if strings.HasSuffix(f.Name(), ".thumb.jpg") {
|
||||
return
|
||||
}
|
||||
|
||||
if err := m.MigrateScreenshots(ctx, path); err != nil {
|
||||
logger.Errorf("Error migrating screenshots for %s: %v", path, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
files, err = f.ReadDir(batchSize)
|
||||
}
|
||||
|
||||
if errors.Is(err, io.EOF) {
|
||||
// end of directory
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -13,28 +13,28 @@ import (
|
||||
"github.com/stashapp/stash/pkg/scene"
|
||||
"github.com/stashapp/stash/pkg/scene/generate"
|
||||
"github.com/stashapp/stash/pkg/sliceutil/stringslice"
|
||||
"github.com/stashapp/stash/pkg/utils"
|
||||
)
|
||||
|
||||
type GenerateMetadataInput struct {
|
||||
Sprites *bool `json:"sprites"`
|
||||
Previews *bool `json:"previews"`
|
||||
ImagePreviews *bool `json:"imagePreviews"`
|
||||
Covers bool `json:"covers"`
|
||||
Sprites bool `json:"sprites"`
|
||||
Previews bool `json:"previews"`
|
||||
ImagePreviews bool `json:"imagePreviews"`
|
||||
PreviewOptions *GeneratePreviewOptionsInput `json:"previewOptions"`
|
||||
Markers *bool `json:"markers"`
|
||||
MarkerImagePreviews *bool `json:"markerImagePreviews"`
|
||||
MarkerScreenshots *bool `json:"markerScreenshots"`
|
||||
Transcodes *bool `json:"transcodes"`
|
||||
Markers bool `json:"markers"`
|
||||
MarkerImagePreviews bool `json:"markerImagePreviews"`
|
||||
MarkerScreenshots bool `json:"markerScreenshots"`
|
||||
Transcodes bool `json:"transcodes"`
|
||||
// Generate transcodes even if not required
|
||||
ForceTranscodes *bool `json:"forceTranscodes"`
|
||||
Phashes *bool `json:"phashes"`
|
||||
InteractiveHeatmapsSpeeds *bool `json:"interactiveHeatmapsSpeeds"`
|
||||
ForceTranscodes bool `json:"forceTranscodes"`
|
||||
Phashes bool `json:"phashes"`
|
||||
InteractiveHeatmapsSpeeds bool `json:"interactiveHeatmapsSpeeds"`
|
||||
// scene ids to generate for
|
||||
SceneIDs []string `json:"sceneIDs"`
|
||||
// marker ids to generate for
|
||||
MarkerIDs []string `json:"markerIDs"`
|
||||
// overwrite existing media
|
||||
Overwrite *bool `json:"overwrite"`
|
||||
Overwrite bool `json:"overwrite"`
|
||||
}
|
||||
|
||||
type GeneratePreviewOptionsInput struct {
|
||||
@@ -61,6 +61,7 @@ type GenerateJob struct {
|
||||
}
|
||||
|
||||
type totalsGenerate struct {
|
||||
covers int64
|
||||
sprites int64
|
||||
previews int64
|
||||
imagePreviews int64
|
||||
@@ -77,9 +78,7 @@ func (j *GenerateJob) Execute(ctx context.Context, progress *job.Progress) {
|
||||
var err error
|
||||
var markers []*models.SceneMarker
|
||||
|
||||
if j.input.Overwrite != nil {
|
||||
j.overwrite = *j.input.Overwrite
|
||||
}
|
||||
j.overwrite = j.input.Overwrite
|
||||
j.fileNamingAlgo = config.GetInstance().GetVideoFileNamingAlgorithm()
|
||||
|
||||
config := config.GetInstance()
|
||||
@@ -143,7 +142,7 @@ func (j *GenerateJob) Execute(ctx context.Context, progress *job.Progress) {
|
||||
return
|
||||
}
|
||||
|
||||
logger.Infof("Generating %d sprites %d previews %d image previews %d markers %d transcodes %d phashes %d heatmaps & speeds", totals.sprites, totals.previews, totals.imagePreviews, totals.markers, totals.transcodes, totals.phashes, totals.interactiveHeatmapSpeeds)
|
||||
logger.Infof("Generating %d covers %d sprites %d previews %d image previews %d markers %d transcodes %d phashes %d heatmaps & speeds", totals.covers, totals.sprites, totals.previews, totals.imagePreviews, totals.markers, totals.transcodes, totals.phashes, totals.interactiveHeatmapSpeeds)
|
||||
|
||||
progress.SetTotal(int(totals.tasks))
|
||||
}()
|
||||
@@ -266,7 +265,20 @@ func getGeneratePreviewOptions(optionsInput GeneratePreviewOptionsInput) generat
|
||||
}
|
||||
|
||||
func (j *GenerateJob) queueSceneJobs(ctx context.Context, g *generate.Generator, scene *models.Scene, queue chan<- Task, totals *totalsGenerate) {
|
||||
if utils.IsTrue(j.input.Sprites) {
|
||||
if j.input.Covers {
|
||||
task := &GenerateCoverTask{
|
||||
txnManager: j.txnManager,
|
||||
Scene: *scene,
|
||||
}
|
||||
|
||||
if j.overwrite || task.required(ctx) {
|
||||
totals.covers++
|
||||
totals.tasks++
|
||||
queue <- task
|
||||
}
|
||||
}
|
||||
|
||||
if j.input.Sprites {
|
||||
task := &GenerateSpriteTask{
|
||||
Scene: *scene,
|
||||
Overwrite: j.overwrite,
|
||||
@@ -286,10 +298,10 @@ func (j *GenerateJob) queueSceneJobs(ctx context.Context, g *generate.Generator,
|
||||
}
|
||||
options := getGeneratePreviewOptions(*generatePreviewOptions)
|
||||
|
||||
if utils.IsTrue(j.input.Previews) {
|
||||
if j.input.Previews {
|
||||
task := &GeneratePreviewTask{
|
||||
Scene: *scene,
|
||||
ImagePreview: utils.IsTrue(j.input.ImagePreviews),
|
||||
ImagePreview: j.input.ImagePreviews,
|
||||
Options: options,
|
||||
Overwrite: j.overwrite,
|
||||
fileNamingAlgorithm: j.fileNamingAlgo,
|
||||
@@ -303,7 +315,7 @@ func (j *GenerateJob) queueSceneJobs(ctx context.Context, g *generate.Generator,
|
||||
addTask = true
|
||||
}
|
||||
|
||||
if utils.IsTrue(j.input.ImagePreviews) && (j.overwrite || !task.doesImagePreviewExist()) {
|
||||
if j.input.ImagePreviews && (j.overwrite || !task.doesImagePreviewExist()) {
|
||||
totals.imagePreviews++
|
||||
addTask = true
|
||||
}
|
||||
@@ -315,14 +327,14 @@ func (j *GenerateJob) queueSceneJobs(ctx context.Context, g *generate.Generator,
|
||||
}
|
||||
}
|
||||
|
||||
if utils.IsTrue(j.input.Markers) {
|
||||
if j.input.Markers {
|
||||
task := &GenerateMarkersTask{
|
||||
TxnManager: j.txnManager,
|
||||
Scene: scene,
|
||||
Overwrite: j.overwrite,
|
||||
fileNamingAlgorithm: j.fileNamingAlgo,
|
||||
ImagePreview: utils.IsTrue(j.input.MarkerImagePreviews),
|
||||
Screenshot: utils.IsTrue(j.input.MarkerScreenshots),
|
||||
ImagePreview: j.input.MarkerImagePreviews,
|
||||
Screenshot: j.input.MarkerScreenshots,
|
||||
|
||||
generator: g,
|
||||
}
|
||||
@@ -336,8 +348,8 @@ func (j *GenerateJob) queueSceneJobs(ctx context.Context, g *generate.Generator,
|
||||
}
|
||||
}
|
||||
|
||||
if utils.IsTrue(j.input.Transcodes) {
|
||||
forceTranscode := utils.IsTrue(j.input.ForceTranscodes)
|
||||
if j.input.Transcodes {
|
||||
forceTranscode := j.input.ForceTranscodes
|
||||
task := &GenerateTranscodeTask{
|
||||
Scene: *scene,
|
||||
Overwrite: j.overwrite,
|
||||
@@ -352,7 +364,7 @@ func (j *GenerateJob) queueSceneJobs(ctx context.Context, g *generate.Generator,
|
||||
}
|
||||
}
|
||||
|
||||
if utils.IsTrue(j.input.Phashes) {
|
||||
if j.input.Phashes {
|
||||
// generate for all files in scene
|
||||
for _, f := range scene.Files.List() {
|
||||
task := &GeneratePhashTask{
|
||||
@@ -371,7 +383,7 @@ func (j *GenerateJob) queueSceneJobs(ctx context.Context, g *generate.Generator,
|
||||
}
|
||||
}
|
||||
|
||||
if utils.IsTrue(j.input.InteractiveHeatmapsSpeeds) {
|
||||
if j.input.InteractiveHeatmapsSpeeds {
|
||||
task := &GenerateInteractiveHeatmapSpeedTask{
|
||||
Scene: *scene,
|
||||
Overwrite: j.overwrite,
|
||||
|
||||
@@ -3,25 +3,32 @@ package manager
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/scene"
|
||||
"github.com/stashapp/stash/pkg/scene/generate"
|
||||
)
|
||||
|
||||
type GenerateScreenshotTask struct {
|
||||
Scene models.Scene
|
||||
ScreenshotAt *float64
|
||||
fileNamingAlgorithm models.HashAlgorithm
|
||||
txnManager Repository
|
||||
type GenerateCoverTask struct {
|
||||
Scene models.Scene
|
||||
ScreenshotAt *float64
|
||||
txnManager Repository
|
||||
Overwrite bool
|
||||
}
|
||||
|
||||
func (t *GenerateScreenshotTask) Start(ctx context.Context) {
|
||||
func (t *GenerateCoverTask) GetDescription() string {
|
||||
return fmt.Sprintf("Generating cover for %s", t.Scene.GetTitle())
|
||||
}
|
||||
|
||||
func (t *GenerateCoverTask) Start(ctx context.Context) {
|
||||
scenePath := t.Scene.Path
|
||||
|
||||
if err := t.txnManager.WithReadTxn(ctx, func(ctx context.Context) error {
|
||||
return t.Scene.LoadPrimaryFile(ctx, t.txnManager.File)
|
||||
}); err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
|
||||
videoFile := t.Scene.Files.Primary()
|
||||
if videoFile == nil {
|
||||
return
|
||||
@@ -34,12 +41,8 @@ func (t *GenerateScreenshotTask) Start(ctx context.Context) {
|
||||
at = *t.ScreenshotAt
|
||||
}
|
||||
|
||||
checksum := t.Scene.GetHash(t.fileNamingAlgorithm)
|
||||
normalPath := instance.Paths.Scene.GetScreenshotPath(checksum)
|
||||
|
||||
// we'll generate the screenshot, grab the generated data and set it
|
||||
// in the database. We'll use SetSceneScreenshot to set the data
|
||||
// which also generates the thumbnail
|
||||
// in the database.
|
||||
|
||||
logger.Debugf("Creating screenshot for %s", scenePath)
|
||||
|
||||
@@ -51,35 +54,19 @@ func (t *GenerateScreenshotTask) Start(ctx context.Context) {
|
||||
Overwrite: true,
|
||||
}
|
||||
|
||||
if err := g.Screenshot(context.TODO(), videoFile.Path, checksum, videoFile.Width, videoFile.Duration, generate.ScreenshotOptions{
|
||||
coverImageData, err := g.Screenshot(context.TODO(), videoFile.Path, videoFile.Width, videoFile.Duration, generate.ScreenshotOptions{
|
||||
At: &at,
|
||||
}); err != nil {
|
||||
})
|
||||
if err != nil {
|
||||
logger.Errorf("Error generating screenshot: %v", err)
|
||||
logErrorOutput(err)
|
||||
return
|
||||
}
|
||||
|
||||
f, err := os.Open(normalPath)
|
||||
if err != nil {
|
||||
logger.Errorf("Error reading screenshot: %s", err.Error())
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
coverImageData, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
logger.Errorf("Error reading screenshot: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if err := t.txnManager.WithTxn(ctx, func(ctx context.Context) error {
|
||||
qb := t.txnManager.Scene
|
||||
updatedScene := models.NewScenePartial()
|
||||
|
||||
if err := scene.SetScreenshot(instance.Paths, checksum, coverImageData); err != nil {
|
||||
return fmt.Errorf("error writing screenshot: %v", err)
|
||||
}
|
||||
|
||||
// update the scene cover table
|
||||
if err := qb.UpdateCover(ctx, t.Scene.ID, coverImageData); err != nil {
|
||||
return fmt.Errorf("error setting screenshot: %v", err)
|
||||
@@ -96,3 +83,19 @@ func (t *GenerateScreenshotTask) Start(ctx context.Context) {
|
||||
logger.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// required returns true if the sprite needs to be generated
|
||||
func (t GenerateCoverTask) required(ctx context.Context) bool {
|
||||
if t.Overwrite {
|
||||
return true
|
||||
}
|
||||
|
||||
// if the scene has a cover, then we don't need to generate it
|
||||
hasCover, err := t.txnManager.Scene.HasCover(ctx, t.Scene.ID)
|
||||
if err != nil {
|
||||
logger.Errorf("Error getting cover: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return !hasCover
|
||||
}
|
||||
|
||||
@@ -138,12 +138,8 @@ func (j *IdentifyJob) identifyScene(ctx context.Context, s *models.Scene, source
|
||||
PerformerCreator: instance.Repository.Performer,
|
||||
TagCreator: instance.Repository.Tag,
|
||||
|
||||
DefaultOptions: j.input.Options,
|
||||
Sources: sources,
|
||||
ScreenshotSetter: &scene.PathsCoverSetter{
|
||||
Paths: instance.Paths,
|
||||
FileNamingAlgorithm: instance.Config.GetVideoFileNamingAlgorithm(),
|
||||
},
|
||||
DefaultOptions: j.input.Options,
|
||||
Sources: sources,
|
||||
SceneUpdatePostHookExecutor: j.postHookExecutor,
|
||||
}
|
||||
|
||||
|
||||
@@ -194,22 +194,22 @@ func (f *handlerRequiredFilter) Accept(ctx context.Context, ff file.File) bool {
|
||||
}
|
||||
|
||||
if isVideoFile {
|
||||
// check if the screenshot file exists
|
||||
hash := scene.GetHash(ff, f.videoFileNamingAlgorithm)
|
||||
ssPath := instance.Paths.Scene.GetScreenshotPath(hash)
|
||||
if exists, _ := fsutil.FileExists(ssPath); !exists {
|
||||
// if not, check if the file is a primary file for a scene
|
||||
scenes, err := f.SceneFinder.FindByPrimaryFileID(ctx, ff.Base().ID)
|
||||
if err != nil {
|
||||
// just ignore
|
||||
return false
|
||||
}
|
||||
// TODO - check if the cover exists
|
||||
// hash := scene.GetHash(ff, f.videoFileNamingAlgorithm)
|
||||
// ssPath := instance.Paths.Scene.GetScreenshotPath(hash)
|
||||
// if exists, _ := fsutil.FileExists(ssPath); !exists {
|
||||
// // if not, check if the file is a primary file for a scene
|
||||
// scenes, err := f.SceneFinder.FindByPrimaryFileID(ctx, ff.Base().ID)
|
||||
// if err != nil {
|
||||
// // just ignore
|
||||
// return false
|
||||
// }
|
||||
|
||||
if len(scenes) > 0 {
|
||||
// if it is, then it needs to be re-generated
|
||||
return true
|
||||
}
|
||||
}
|
||||
// if len(scenes) > 0 {
|
||||
// // if it is, then it needs to be re-generated
|
||||
// return true
|
||||
// }
|
||||
// }
|
||||
|
||||
// clean captions - scene handler handles this as well, but
|
||||
// unchanged files aren't processed by the scene handler
|
||||
@@ -349,7 +349,6 @@ func getScanHandlers(options ScanMetadataInput, taskQueue *job.TaskQueue, progre
|
||||
CreatorUpdater: db.Scene,
|
||||
PluginCache: pluginCache,
|
||||
CaptionUpdater: db.File,
|
||||
CoverGenerator: &coverGenerator{},
|
||||
ScanGenerator: &sceneGenerators{
|
||||
input: options,
|
||||
taskQueue: taskQueue,
|
||||
@@ -485,5 +484,17 @@ func (g *sceneGenerators) Generate(ctx context.Context, s *models.Scene, f *file
|
||||
}
|
||||
}
|
||||
|
||||
if t.ScanGenerateCovers {
|
||||
progress.AddTotal(1)
|
||||
g.taskQueue.Add(fmt.Sprintf("Generating cover for %s", path), func(ctx context.Context) {
|
||||
taskCover := GenerateCoverTask{
|
||||
Scene: *s,
|
||||
txnManager: instance.Repository,
|
||||
}
|
||||
taskCover.Start(ctx)
|
||||
progress.Increment()
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -380,7 +380,7 @@ func (j *cleanJob) deleteFile(ctx context.Context, fileID ID, fn string) {
|
||||
// delete associated objects
|
||||
fileDeleter := NewDeleter()
|
||||
if err := txn.WithTxn(ctx, j.Repository, func(ctx context.Context) error {
|
||||
fileDeleter.RegisterHooks(ctx, j.Repository)
|
||||
fileDeleter.RegisterHooks(ctx)
|
||||
|
||||
if err := j.fireHandlers(ctx, fileDeleter, fileID); err != nil {
|
||||
return err
|
||||
@@ -397,7 +397,7 @@ func (j *cleanJob) deleteFolder(ctx context.Context, folderID FolderID, fn strin
|
||||
// delete associated objects
|
||||
fileDeleter := NewDeleter()
|
||||
if err := txn.WithTxn(ctx, j.Repository, func(ctx context.Context) error {
|
||||
fileDeleter.RegisterHooks(ctx, j.Repository)
|
||||
fileDeleter.RegisterHooks(ctx)
|
||||
|
||||
if err := j.fireFolderHandlers(ctx, fileDeleter, folderID); err != nil {
|
||||
return err
|
||||
|
||||
@@ -69,15 +69,13 @@ func NewDeleter() *Deleter {
|
||||
}
|
||||
|
||||
// RegisterHooks registers post-commit and post-rollback hooks.
|
||||
func (d *Deleter) RegisterHooks(ctx context.Context, mgr txn.Manager) {
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) error {
|
||||
func (d *Deleter) RegisterHooks(ctx context.Context) {
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) {
|
||||
d.Commit()
|
||||
return nil
|
||||
})
|
||||
|
||||
txn.AddPostRollbackHook(ctx, func(ctx context.Context) error {
|
||||
txn.AddPostRollbackHook(ctx, func(ctx context.Context) {
|
||||
d.Rollback()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,26 @@ type FS interface {
|
||||
// OsFS is a file system backed by the OS.
|
||||
type OsFS struct{}
|
||||
|
||||
func (f *OsFS) Create(name string) (*os.File, error) {
|
||||
return os.Create(name)
|
||||
}
|
||||
|
||||
func (f *OsFS) MkdirAll(path string, perm fs.FileMode) error {
|
||||
return os.MkdirAll(path, perm)
|
||||
}
|
||||
|
||||
func (f *OsFS) Remove(name string) error {
|
||||
return os.Remove(name)
|
||||
}
|
||||
|
||||
func (f *OsFS) Rename(oldpath, newpath string) error {
|
||||
return os.Rename(oldpath, newpath)
|
||||
}
|
||||
|
||||
func (f *OsFS) RemoveAll(path string) error {
|
||||
return os.RemoveAll(path)
|
||||
}
|
||||
|
||||
func (f *OsFS) Stat(name string) (fs.FileInfo, error) {
|
||||
return os.Stat(name)
|
||||
}
|
||||
|
||||
@@ -508,12 +508,11 @@ func (s *scanJob) onNewFolder(ctx context.Context, file scanFile) (*Folder, erro
|
||||
}
|
||||
}
|
||||
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) error {
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) {
|
||||
// log at the end so that if anything fails above due to a locked database
|
||||
// error and the transaction must be retried, then we shouldn't get multiple
|
||||
// logs of the same thing.
|
||||
logger.Infof("%s doesn't exist. Creating new folder entry...", file.Path)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := s.Repository.FolderStore.Create(ctx, toCreate); err != nil {
|
||||
|
||||
@@ -143,15 +143,13 @@ func (h *ScanHandler) Handle(ctx context.Context, f file.File, oldFile file.File
|
||||
|
||||
if h.ScanConfig.IsGenerateThumbnails() {
|
||||
// do this after the commit so that the transaction isn't held up
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) error {
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) {
|
||||
for _, s := range existing {
|
||||
if err := h.ThumbnailGenerator.GenerateThumbnail(ctx, s, imageFile); err != nil {
|
||||
// just log if cover generation fails. We can try again on rescan
|
||||
logger.Errorf("Error generating thumbnail for %s: %v", imageFile.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -7,16 +7,17 @@ import (
|
||||
)
|
||||
|
||||
type GenerateMetadataOptions struct {
|
||||
Sprites *bool `json:"sprites"`
|
||||
Previews *bool `json:"previews"`
|
||||
ImagePreviews *bool `json:"imagePreviews"`
|
||||
Covers bool `json:"covers"`
|
||||
Sprites bool `json:"sprites"`
|
||||
Previews bool `json:"previews"`
|
||||
ImagePreviews bool `json:"imagePreviews"`
|
||||
PreviewOptions *GeneratePreviewOptions `json:"previewOptions"`
|
||||
Markers *bool `json:"markers"`
|
||||
MarkerImagePreviews *bool `json:"markerImagePreviews"`
|
||||
MarkerScreenshots *bool `json:"markerScreenshots"`
|
||||
Transcodes *bool `json:"transcodes"`
|
||||
Phashes *bool `json:"phashes"`
|
||||
InteractiveHeatmapsSpeeds *bool `json:"interactiveHeatmapsSpeeds"`
|
||||
Markers bool `json:"markers"`
|
||||
MarkerImagePreviews bool `json:"markerImagePreviews"`
|
||||
MarkerScreenshots bool `json:"markerScreenshots"`
|
||||
Transcodes bool `json:"transcodes"`
|
||||
Phashes bool `json:"phashes"`
|
||||
InteractiveHeatmapsSpeeds bool `json:"interactiveHeatmapsSpeeds"`
|
||||
}
|
||||
|
||||
type GeneratePreviewOptions struct {
|
||||
|
||||
@@ -137,20 +137,6 @@ func (_m *MovieReaderWriter) Destroy(ctx context.Context, id int) error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// DestroyImages provides a mock function with given fields: ctx, movieID
|
||||
func (_m *MovieReaderWriter) DestroyImages(ctx context.Context, movieID int) error {
|
||||
ret := _m.Called(ctx, movieID)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int) error); ok {
|
||||
r0 = rf(ctx, movieID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Find provides a mock function with given fields: ctx, id
|
||||
func (_m *MovieReaderWriter) Find(ctx context.Context, id int) (*models.Movie, error) {
|
||||
ret := _m.Called(ctx, id)
|
||||
@@ -388,6 +374,34 @@ func (_m *MovieReaderWriter) Update(ctx context.Context, updatedMovie models.Mov
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// UpdateBackImage provides a mock function with given fields: ctx, movieID, backImage
|
||||
func (_m *MovieReaderWriter) UpdateBackImage(ctx context.Context, movieID int, backImage []byte) error {
|
||||
ret := _m.Called(ctx, movieID, backImage)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int, []byte) error); ok {
|
||||
r0 = rf(ctx, movieID, backImage)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// UpdateFrontImage provides a mock function with given fields: ctx, movieID, frontImage
|
||||
func (_m *MovieReaderWriter) UpdateFrontImage(ctx context.Context, movieID int, frontImage []byte) error {
|
||||
ret := _m.Called(ctx, movieID, frontImage)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int, []byte) error); ok {
|
||||
r0 = rf(ctx, movieID, frontImage)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// UpdateFull provides a mock function with given fields: ctx, updatedMovie
|
||||
func (_m *MovieReaderWriter) UpdateFull(ctx context.Context, updatedMovie models.Movie) (*models.Movie, error) {
|
||||
ret := _m.Called(ctx, updatedMovie)
|
||||
@@ -410,17 +424,3 @@ func (_m *MovieReaderWriter) UpdateFull(ctx context.Context, updatedMovie models
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// UpdateImages provides a mock function with given fields: ctx, movieID, frontImage, backImage
|
||||
func (_m *MovieReaderWriter) UpdateImages(ctx context.Context, movieID int, frontImage []byte, backImage []byte) error {
|
||||
ret := _m.Called(ctx, movieID, frontImage, backImage)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int, []byte, []byte) error); ok {
|
||||
r0 = rf(ctx, movieID, frontImage, backImage)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
@@ -235,20 +235,6 @@ func (_m *SceneReaderWriter) Destroy(ctx context.Context, id int) error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// DestroyCover provides a mock function with given fields: ctx, sceneID
|
||||
func (_m *SceneReaderWriter) DestroyCover(ctx context.Context, sceneID int) error {
|
||||
ret := _m.Called(ctx, sceneID)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int) error); ok {
|
||||
r0 = rf(ctx, sceneID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Duration provides a mock function with given fields: ctx
|
||||
func (_m *SceneReaderWriter) Duration(ctx context.Context) (float64, error) {
|
||||
ret := _m.Called(ctx)
|
||||
@@ -638,6 +624,27 @@ func (_m *SceneReaderWriter) GetTagIDs(ctx context.Context, relatedID int) ([]in
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// HasCover provides a mock function with given fields: ctx, sceneID
|
||||
func (_m *SceneReaderWriter) HasCover(ctx context.Context, sceneID int) (bool, error) {
|
||||
ret := _m.Called(ctx, sceneID)
|
||||
|
||||
var r0 bool
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int) bool); ok {
|
||||
r0 = rf(ctx, sceneID)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, int) error); ok {
|
||||
r1 = rf(ctx, sceneID)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// IncrementOCounter provides a mock function with given fields: ctx, id
|
||||
func (_m *SceneReaderWriter) IncrementOCounter(ctx context.Context, id int) (int, error) {
|
||||
ret := _m.Called(ctx, id)
|
||||
|
||||
@@ -95,20 +95,6 @@ func (_m *StudioReaderWriter) Destroy(ctx context.Context, id int) error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// DestroyImage provides a mock function with given fields: ctx, studioID
|
||||
func (_m *StudioReaderWriter) DestroyImage(ctx context.Context, studioID int) error {
|
||||
ret := _m.Called(ctx, studioID)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int) error); ok {
|
||||
r0 = rf(ctx, studioID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Find provides a mock function with given fields: ctx, id
|
||||
func (_m *StudioReaderWriter) Find(ctx context.Context, id int) (*models.Studio, error) {
|
||||
ret := _m.Called(ctx, id)
|
||||
|
||||
@@ -95,20 +95,6 @@ func (_m *TagReaderWriter) Destroy(ctx context.Context, id int) error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// DestroyImage provides a mock function with given fields: ctx, tagID
|
||||
func (_m *TagReaderWriter) DestroyImage(ctx context.Context, tagID int) error {
|
||||
ret := _m.Called(ctx, tagID)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int) error); ok {
|
||||
r0 = rf(ctx, tagID)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Find provides a mock function with given fields: ctx, id
|
||||
func (_m *TagReaderWriter) Find(ctx context.Context, id int) (*models.Tag, error) {
|
||||
ret := _m.Called(ctx, id)
|
||||
|
||||
@@ -22,6 +22,10 @@ type Movie struct {
|
||||
URL sql.NullString `db:"url" json:"url"`
|
||||
CreatedAt SQLiteTimestamp `db:"created_at" json:"created_at"`
|
||||
UpdatedAt SQLiteTimestamp `db:"updated_at" json:"updated_at"`
|
||||
|
||||
// TODO - this is only here because of database code in the models package
|
||||
FrontImageBlob sql.NullString `db:"front_image_blob" json:"-"`
|
||||
BackImageBlob sql.NullString `db:"back_image_blob" json:"-"`
|
||||
}
|
||||
|
||||
type MoviePartial struct {
|
||||
|
||||
@@ -19,6 +19,8 @@ type Studio struct {
|
||||
Rating sql.NullInt64 `db:"rating" json:"rating"`
|
||||
Details sql.NullString `db:"details" json:"details"`
|
||||
IgnoreAutoTag bool `db:"ignore_auto_tag" json:"ignore_auto_tag"`
|
||||
// TODO - this is only here because of database code in the models package
|
||||
ImageBlob sql.NullString `db:"image_blob" json:"-"`
|
||||
}
|
||||
|
||||
type StudioPartial struct {
|
||||
|
||||
@@ -6,12 +6,14 @@ import (
|
||||
)
|
||||
|
||||
type Tag struct {
|
||||
ID int `db:"id" json:"id"`
|
||||
Name string `db:"name" json:"name"` // TODO make schema not null
|
||||
Description sql.NullString `db:"description" json:"description"`
|
||||
IgnoreAutoTag bool `db:"ignore_auto_tag" json:"ignore_auto_tag"`
|
||||
CreatedAt SQLiteTimestamp `db:"created_at" json:"created_at"`
|
||||
UpdatedAt SQLiteTimestamp `db:"updated_at" json:"updated_at"`
|
||||
ID int `db:"id" json:"id"`
|
||||
Name string `db:"name" json:"name"` // TODO make schema not null
|
||||
Description sql.NullString `db:"description" json:"description"`
|
||||
IgnoreAutoTag bool `db:"ignore_auto_tag" json:"ignore_auto_tag"`
|
||||
// TODO - this is only here because of database code in the models package
|
||||
ImageBlob sql.NullString `db:"image_blob" json:"-"`
|
||||
CreatedAt SQLiteTimestamp `db:"created_at" json:"created_at"`
|
||||
UpdatedAt SQLiteTimestamp `db:"updated_at" json:"updated_at"`
|
||||
}
|
||||
|
||||
type TagPartial struct {
|
||||
|
||||
@@ -50,8 +50,8 @@ type MovieWriter interface {
|
||||
Update(ctx context.Context, updatedMovie MoviePartial) (*Movie, error)
|
||||
UpdateFull(ctx context.Context, updatedMovie Movie) (*Movie, error)
|
||||
Destroy(ctx context.Context, id int) error
|
||||
UpdateImages(ctx context.Context, movieID int, frontImage []byte, backImage []byte) error
|
||||
DestroyImages(ctx context.Context, movieID int) error
|
||||
UpdateFrontImage(ctx context.Context, movieID int, frontImage []byte) error
|
||||
UpdateBackImage(ctx context.Context, movieID int, backImage []byte) error
|
||||
}
|
||||
|
||||
type MovieReaderWriter interface {
|
||||
|
||||
@@ -11,14 +11,17 @@ type Paths struct {
|
||||
|
||||
Scene *scenePaths
|
||||
SceneMarkers *sceneMarkerPaths
|
||||
Blobs string
|
||||
}
|
||||
|
||||
func NewPaths(generatedPath string) Paths {
|
||||
func NewPaths(generatedPath string, blobsPath string) Paths {
|
||||
p := Paths{}
|
||||
p.Generated = newGeneratedPaths(generatedPath)
|
||||
|
||||
p.Scene = newScenePaths(p)
|
||||
p.SceneMarkers = newSceneMarkerPaths(p)
|
||||
p.Blobs = blobsPath
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
|
||||
@@ -17,14 +17,10 @@ func newScenePaths(p Paths) *scenePaths {
|
||||
return &sp
|
||||
}
|
||||
|
||||
func (sp *scenePaths) GetScreenshotPath(checksum string) string {
|
||||
func (sp *scenePaths) GetLegacyScreenshotPath(checksum string) string {
|
||||
return filepath.Join(sp.Screenshots, checksum+".jpg")
|
||||
}
|
||||
|
||||
func (sp *scenePaths) GetThumbnailScreenshotPath(checksum string) string {
|
||||
return filepath.Join(sp.Screenshots, checksum+".thumb.jpg")
|
||||
}
|
||||
|
||||
func (sp *scenePaths) GetTranscodePath(checksum string) string {
|
||||
return filepath.Join(sp.Transcodes, checksum+".mp4")
|
||||
}
|
||||
|
||||
@@ -176,6 +176,7 @@ type SceneReader interface {
|
||||
All(ctx context.Context) ([]*Scene, error)
|
||||
Query(ctx context.Context, options SceneQueryOptions) (*SceneQueryResult, error)
|
||||
GetCover(ctx context.Context, sceneID int) ([]byte, error)
|
||||
HasCover(ctx context.Context, sceneID int) (bool, error)
|
||||
}
|
||||
|
||||
type SceneWriter interface {
|
||||
@@ -189,7 +190,6 @@ type SceneWriter interface {
|
||||
IncrementWatchCount(ctx context.Context, id int) (int, error)
|
||||
Destroy(ctx context.Context, id int) error
|
||||
UpdateCover(ctx context.Context, sceneID int, cover []byte) error
|
||||
DestroyCover(ctx context.Context, sceneID int) error
|
||||
}
|
||||
|
||||
type SceneReaderWriter interface {
|
||||
|
||||
@@ -66,7 +66,6 @@ type StudioWriter interface {
|
||||
UpdateFull(ctx context.Context, updatedStudio Studio) (*Studio, error)
|
||||
Destroy(ctx context.Context, id int) error
|
||||
UpdateImage(ctx context.Context, studioID int, image []byte) error
|
||||
DestroyImage(ctx context.Context, studioID int) error
|
||||
UpdateStashIDs(ctx context.Context, studioID int, stashIDs []StashID) error
|
||||
UpdateAliases(ctx context.Context, studioID int, aliases []string) error
|
||||
}
|
||||
|
||||
@@ -74,7 +74,6 @@ type TagWriter interface {
|
||||
UpdateFull(ctx context.Context, updatedTag Tag) (*Tag, error)
|
||||
Destroy(ctx context.Context, id int) error
|
||||
UpdateImage(ctx context.Context, tagID int, image []byte) error
|
||||
DestroyImage(ctx context.Context, tagID int) error
|
||||
UpdateAliases(ctx context.Context, tagID int, aliases []string) error
|
||||
Merge(ctx context.Context, source []int, destination int) error
|
||||
UpdateParentTags(ctx context.Context, tagID int, parentIDs []int) error
|
||||
|
||||
@@ -12,10 +12,15 @@ import (
|
||||
"github.com/stashapp/stash/pkg/utils"
|
||||
)
|
||||
|
||||
type ImageUpdater interface {
|
||||
UpdateFrontImage(ctx context.Context, movieID int, frontImage []byte) error
|
||||
UpdateBackImage(ctx context.Context, movieID int, backImage []byte) error
|
||||
}
|
||||
|
||||
type NameFinderCreatorUpdater interface {
|
||||
NameFinderCreator
|
||||
UpdateFull(ctx context.Context, updatedMovie models.Movie) (*models.Movie, error)
|
||||
UpdateImages(ctx context.Context, movieID int, frontImage []byte, backImage []byte) error
|
||||
ImageUpdater
|
||||
}
|
||||
|
||||
type Importer struct {
|
||||
@@ -126,8 +131,14 @@ func (i *Importer) createStudio(ctx context.Context, name string) (int, error) {
|
||||
|
||||
func (i *Importer) PostImport(ctx context.Context, id int) error {
|
||||
if len(i.frontImageData) > 0 {
|
||||
if err := i.ReaderWriter.UpdateImages(ctx, id, i.frontImageData, i.backImageData); err != nil {
|
||||
return fmt.Errorf("error setting movie images: %v", err)
|
||||
if err := i.ReaderWriter.UpdateFrontImage(ctx, id, i.frontImageData); err != nil {
|
||||
return fmt.Errorf("error setting movie front image: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(i.backImageData) > 0 {
|
||||
if err := i.ReaderWriter.UpdateBackImage(ctx, id, i.backImageData); err != nil {
|
||||
return fmt.Errorf("error setting movie back image: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -162,8 +162,9 @@ func TestImporterPostImport(t *testing.T) {
|
||||
|
||||
updateMovieImageErr := errors.New("UpdateImages error")
|
||||
|
||||
readerWriter.On("UpdateImages", testCtx, movieID, frontImageBytes, backImageBytes).Return(nil).Once()
|
||||
readerWriter.On("UpdateImages", testCtx, errImageID, frontImageBytes, backImageBytes).Return(updateMovieImageErr).Once()
|
||||
readerWriter.On("UpdateFrontImage", testCtx, movieID, frontImageBytes).Return(nil).Once()
|
||||
readerWriter.On("UpdateBackImage", testCtx, movieID, backImageBytes).Return(nil).Once()
|
||||
readerWriter.On("UpdateFrontImage", testCtx, errImageID, frontImageBytes).Return(updateMovieImageErr).Once()
|
||||
|
||||
err := i.PostImport(testCtx, movieID)
|
||||
assert.Nil(t, err)
|
||||
|
||||
@@ -210,9 +210,8 @@ func (c Cache) ExecutePostHooks(ctx context.Context, id int, hookType HookTrigge
|
||||
}
|
||||
|
||||
func (c Cache) RegisterPostHooks(ctx context.Context, id int, hookType HookTriggerEnum, input interface{}, inputFields []string) {
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) error {
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) {
|
||||
c.ExecutePostHooks(ctx, id, hookType, input, inputFields)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -7,10 +7,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stashapp/stash/pkg/file"
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/plugin"
|
||||
"github.com/stashapp/stash/pkg/txn"
|
||||
)
|
||||
|
||||
func (s *Service) Create(ctx context.Context, input *models.Scene, fileIDs []file.ID, coverImage []byte) (*models.Scene, error) {
|
||||
@@ -55,18 +53,6 @@ func (s *Service) Create(ctx context.Context, input *models.Scene, fileIDs []fil
|
||||
if err := s.Repository.UpdateCover(ctx, ret.ID, coverImage); err != nil {
|
||||
return nil, fmt.Errorf("setting cover on new scene: %w", err)
|
||||
}
|
||||
|
||||
// only update the cover image if provided and everything else was successful
|
||||
// only do this if there is a file associated
|
||||
if len(fileIDs) > 0 {
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) error {
|
||||
if err := SetScreenshot(s.Paths, ret.GetHash(s.Config.GetVideoFileNamingAlgorithm()), coverImage); err != nil {
|
||||
logger.Errorf("Error setting screenshot: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
s.PluginCache.RegisterPostHooks(ctx, ret.ID, plugin.SceneCreatePost, nil, nil)
|
||||
|
||||
@@ -38,18 +38,6 @@ func (d *FileDeleter) MarkGeneratedFiles(scene *models.Scene) error {
|
||||
|
||||
var files []string
|
||||
|
||||
thumbPath := d.Paths.Scene.GetThumbnailScreenshotPath(sceneHash)
|
||||
exists, _ = fsutil.FileExists(thumbPath)
|
||||
if exists {
|
||||
files = append(files, thumbPath)
|
||||
}
|
||||
|
||||
normalPath := d.Paths.Scene.GetScreenshotPath(sceneHash)
|
||||
exists, _ = fsutil.FileExists(normalPath)
|
||||
if exists {
|
||||
files = append(files, normalPath)
|
||||
}
|
||||
|
||||
streamPreviewPath := d.Paths.Scene.GetVideoPreviewPath(sceneHash)
|
||||
exists, _ = fsutil.FileExists(streamPreviewPath)
|
||||
if exists {
|
||||
|
||||
@@ -38,9 +38,6 @@ type ScenePaths interface {
|
||||
GetVideoPreviewPath(checksum string) string
|
||||
GetWebpPreviewPath(checksum string) string
|
||||
|
||||
GetScreenshotPath(checksum string) string
|
||||
GetThumbnailScreenshotPath(checksum string) string
|
||||
|
||||
GetSpriteImageFilePath(checksum string) string
|
||||
GetSpriteVttFilePath(checksum string) string
|
||||
|
||||
@@ -106,6 +103,26 @@ func (g Generator) generateFile(lockCtx *fsutil.LockContext, p Paths, pattern st
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateBytes performs a generate operation by generating a temporary file using p and pattern, returns the contents, then deletes it.
|
||||
func (g Generator) generateBytes(lockCtx *fsutil.LockContext, p Paths, pattern string, generateFn generateFn) ([]byte, error) {
|
||||
tmpFile, err := g.tempFile(p, pattern) // tmp output in case the process ends abruptly
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tmpFn := tmpFile.Name()
|
||||
defer func() {
|
||||
_ = os.Remove(tmpFn)
|
||||
}()
|
||||
|
||||
if err := generateFn(lockCtx, tmpFn); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer os.Remove(tmpFn)
|
||||
return os.ReadFile(tmpFn)
|
||||
}
|
||||
|
||||
// generate runs ffmpeg with the given args and waits for it to finish.
|
||||
// Returns an error if the command fails. If the command fails, the return
|
||||
// value will be of type *exec.ExitError.
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
thumbnailWidth = 320
|
||||
thumbnailQuality = 5
|
||||
// thumbnailWidth = 320
|
||||
// thumbnailQuality = 5
|
||||
|
||||
screenshotQuality = 2
|
||||
|
||||
@@ -21,17 +21,10 @@ type ScreenshotOptions struct {
|
||||
At *float64
|
||||
}
|
||||
|
||||
func (g Generator) Screenshot(ctx context.Context, input string, hash string, videoWidth int, videoDuration float64, options ScreenshotOptions) error {
|
||||
func (g Generator) Screenshot(ctx context.Context, input string, videoWidth int, videoDuration float64, options ScreenshotOptions) ([]byte, error) {
|
||||
lockCtx := g.LockManager.ReadLock(ctx, input)
|
||||
defer lockCtx.Cancel()
|
||||
|
||||
output := g.ScenePaths.GetScreenshotPath(hash)
|
||||
if !g.Overwrite {
|
||||
if exists, _ := fsutil.FileExists(output); exists {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
logger.Infof("Creating screenshot for %s", input)
|
||||
|
||||
at := screenshotDurationProportion * videoDuration
|
||||
@@ -39,46 +32,16 @@ func (g Generator) Screenshot(ctx context.Context, input string, hash string, vi
|
||||
at = *options.At
|
||||
}
|
||||
|
||||
if err := g.generateFile(lockCtx, g.ScenePaths, jpgPattern, output, g.screenshot(input, screenshotOptions{
|
||||
ret, err := g.generateBytes(lockCtx, g.ScenePaths, jpgPattern, g.screenshot(input, screenshotOptions{
|
||||
Time: at,
|
||||
Quality: screenshotQuality,
|
||||
// default Width is video width
|
||||
})); err != nil {
|
||||
return err
|
||||
}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Debug("created screenshot: ", output)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g Generator) Thumbnail(ctx context.Context, input string, hash string, videoDuration float64, options ScreenshotOptions) error {
|
||||
lockCtx := g.LockManager.ReadLock(ctx, input)
|
||||
defer lockCtx.Cancel()
|
||||
|
||||
output := g.ScenePaths.GetThumbnailScreenshotPath(hash)
|
||||
if !g.Overwrite {
|
||||
if exists, _ := fsutil.FileExists(output); exists {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
at := screenshotDurationProportion * videoDuration
|
||||
if options.At != nil {
|
||||
at = *options.At
|
||||
}
|
||||
|
||||
if err := g.generateFile(lockCtx, g.ScenePaths, jpgPattern, output, g.screenshot(input, screenshotOptions{
|
||||
Time: at,
|
||||
Quality: thumbnailQuality,
|
||||
Width: thumbnailWidth,
|
||||
})); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Debug("created thumbnail: ", output)
|
||||
|
||||
return nil
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
type screenshotOptions struct {
|
||||
|
||||
@@ -123,7 +123,7 @@ func (s *Service) mergeSceneMarkers(ctx context.Context, dest *models.Scene, src
|
||||
}
|
||||
|
||||
if len(toRename) > 0 {
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) error {
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) {
|
||||
// rename the files if they exist
|
||||
for _, e := range toRename {
|
||||
srcExists, _ := fsutil.FileExists(e.src)
|
||||
@@ -135,8 +135,6 @@ func (s *Service) mergeSceneMarkers(ctx context.Context, dest *models.Scene, src
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -16,14 +16,6 @@ func MigrateHash(p *paths.Paths, oldHash string, newHash string) {
|
||||
migrateSceneFiles(oldPath, newPath)
|
||||
|
||||
scenePaths := p.Scene
|
||||
oldPath = scenePaths.GetThumbnailScreenshotPath(oldHash)
|
||||
newPath = scenePaths.GetThumbnailScreenshotPath(newHash)
|
||||
migrateSceneFiles(oldPath, newPath)
|
||||
|
||||
oldPath = scenePaths.GetScreenshotPath(oldHash)
|
||||
newPath = scenePaths.GetScreenshotPath(newHash)
|
||||
migrateSceneFiles(oldPath, newPath)
|
||||
|
||||
oldPath = scenePaths.GetVideoPreviewPath(oldHash)
|
||||
newPath = scenePaths.GetVideoPreviewPath(newHash)
|
||||
migrateSceneFiles(oldPath, newPath)
|
||||
|
||||
143
pkg/scene/migrate_screenshots.go
Normal file
143
pkg/scene/migrate_screenshots.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package scene
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/txn"
|
||||
)
|
||||
|
||||
type MigrateSceneScreenshotsInput struct {
|
||||
DeleteFiles bool `json:"deleteFiles"`
|
||||
OverwriteExisting bool `json:"overwriteExisting"`
|
||||
}
|
||||
|
||||
type HashFinderCoverUpdater interface {
|
||||
FindByChecksum(ctx context.Context, checksum string) ([]*models.Scene, error)
|
||||
FindByOSHash(ctx context.Context, oshash string) ([]*models.Scene, error)
|
||||
CoverUpdater
|
||||
}
|
||||
|
||||
type ScreenshotMigrator struct {
|
||||
Options MigrateSceneScreenshotsInput
|
||||
SceneUpdater HashFinderCoverUpdater
|
||||
TxnManager txn.Manager
|
||||
}
|
||||
|
||||
func (m *ScreenshotMigrator) MigrateScreenshots(ctx context.Context, screenshotPath string) error {
|
||||
// find the scene based on the screenshot path
|
||||
s, err := m.findScenes(ctx, screenshotPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("finding scenes for screenshot: %w", err)
|
||||
}
|
||||
|
||||
for _, scene := range s {
|
||||
// migrate each scene in its own transaction
|
||||
if err := txn.WithTxn(ctx, m.TxnManager, func(ctx context.Context) error {
|
||||
return m.migrateSceneScreenshot(ctx, scene, screenshotPath)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("migrating screenshot for scene %s: %w", scene.DisplayName(), err)
|
||||
}
|
||||
}
|
||||
|
||||
// if deleteFiles is true, delete the file
|
||||
if m.Options.DeleteFiles {
|
||||
if err := os.Remove(screenshotPath); err != nil {
|
||||
// log and continue
|
||||
logger.Errorf("Error deleting screenshot file %s: %v", screenshotPath, err)
|
||||
} else {
|
||||
logger.Debugf("Deleted screenshot file %s", screenshotPath)
|
||||
}
|
||||
|
||||
// also delete the thumb file
|
||||
thumbPath := strings.TrimSuffix(screenshotPath, ".jpg") + ".thumb.jpg"
|
||||
// ignore errors for thumb files
|
||||
if err := os.Remove(thumbPath); err == nil {
|
||||
logger.Debugf("Deleted thumb file %s", thumbPath)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ScreenshotMigrator) findScenes(ctx context.Context, screenshotPath string) ([]*models.Scene, error) {
|
||||
basename := filepath.Base(screenshotPath)
|
||||
ext := filepath.Ext(basename)
|
||||
basename = basename[:len(basename)-len(ext)]
|
||||
|
||||
// use the basename to determine the hash type
|
||||
algo := m.getHashType(basename)
|
||||
|
||||
if algo == "" {
|
||||
// log and return
|
||||
return nil, fmt.Errorf("could not determine hash type")
|
||||
}
|
||||
|
||||
// use the hash type to get the scene
|
||||
var ret []*models.Scene
|
||||
err := txn.WithReadTxn(ctx, m.TxnManager, func(ctx context.Context) error {
|
||||
var err error
|
||||
|
||||
if algo == models.HashAlgorithmOshash {
|
||||
// use oshash
|
||||
ret, err = m.SceneUpdater.FindByOSHash(ctx, basename)
|
||||
} else {
|
||||
// use md5
|
||||
ret, err = m.SceneUpdater.FindByChecksum(ctx, basename)
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (m *ScreenshotMigrator) getHashType(basename string) models.HashAlgorithm {
|
||||
// if the basename is 16 characters long, must be oshash
|
||||
if len(basename) == 16 {
|
||||
return models.HashAlgorithmOshash
|
||||
}
|
||||
|
||||
// if its 32 characters long, must be md5
|
||||
if len(basename) == 32 {
|
||||
return models.HashAlgorithmMd5
|
||||
}
|
||||
|
||||
// otherwise, it's undefined
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ScreenshotMigrator) migrateSceneScreenshot(ctx context.Context, scene *models.Scene, screenshotPath string) error {
|
||||
if !m.Options.OverwriteExisting {
|
||||
// check if the scene has a cover already
|
||||
hasCover, err := m.SceneUpdater.HasCover(ctx, scene.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking for existing cover: %w", err)
|
||||
}
|
||||
|
||||
if hasCover {
|
||||
// already has cover, just silently return
|
||||
logger.Debugf("Scene %s already has a screenshot, skipping", scene.DisplayName())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// get the data from the file
|
||||
data, err := os.ReadFile(screenshotPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading screenshot file: %w", err)
|
||||
}
|
||||
|
||||
if err := m.SceneUpdater.UpdateCover(ctx, scene.ID, data); err != nil {
|
||||
return fmt.Errorf("updating scene screenshot: %w", err)
|
||||
}
|
||||
|
||||
logger.Infof("Updated screenshot for scene %s from %s", scene.DisplayName(), filepath.Base(screenshotPath))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -35,7 +35,6 @@ type ScanGenerator interface {
|
||||
type ScanHandler struct {
|
||||
CreatorUpdater CreatorUpdater
|
||||
|
||||
CoverGenerator CoverGenerator
|
||||
ScanGenerator ScanGenerator
|
||||
CaptionUpdater video.CaptionUpdater
|
||||
PluginCache *plugin.Cache
|
||||
@@ -48,9 +47,6 @@ func (h *ScanHandler) validate() error {
|
||||
if h.CreatorUpdater == nil {
|
||||
return errors.New("CreatorUpdater is required")
|
||||
}
|
||||
if h.CoverGenerator == nil {
|
||||
return errors.New("CoverGenerator is required")
|
||||
}
|
||||
if h.ScanGenerator == nil {
|
||||
return errors.New("ScanGenerator is required")
|
||||
}
|
||||
@@ -132,20 +128,13 @@ func (h *ScanHandler) Handle(ctx context.Context, f file.File, oldFile file.File
|
||||
}
|
||||
|
||||
// do this after the commit so that cover generation doesn't hold up the transaction
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) error {
|
||||
txn.AddPostCommitHook(ctx, func(ctx context.Context) {
|
||||
for _, s := range existing {
|
||||
if err := h.CoverGenerator.GenerateCover(ctx, s, videoFile); err != nil {
|
||||
// just log if cover generation fails. We can try again on rescan
|
||||
logger.Errorf("Error generating cover for %s: %v", videoFile.Path, err)
|
||||
}
|
||||
|
||||
if err := h.ScanGenerator.Generate(ctx, s, videoFile); err != nil {
|
||||
// just log if cover generation fails. We can try again on rescan
|
||||
logger.Errorf("Error generating content for %s: %v", videoFile.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return nil
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
package scene
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"image"
|
||||
"image/jpeg"
|
||||
"os"
|
||||
|
||||
"github.com/stashapp/stash/pkg/file"
|
||||
"github.com/stashapp/stash/pkg/fsutil"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/models/paths"
|
||||
|
||||
"github.com/disintegration/imaging"
|
||||
|
||||
// needed to decode other image formats
|
||||
_ "image/gif"
|
||||
_ "image/png"
|
||||
)
|
||||
|
||||
type CoverGenerator interface {
|
||||
GenerateCover(ctx context.Context, scene *models.Scene, f *file.VideoFile) error
|
||||
}
|
||||
|
||||
type ScreenshotSetter interface {
|
||||
SetScreenshot(scene *models.Scene, imageData []byte) error
|
||||
}
|
||||
|
||||
type PathsCoverSetter struct {
|
||||
Paths *paths.Paths
|
||||
FileNamingAlgorithm models.HashAlgorithm
|
||||
}
|
||||
|
||||
func (ss *PathsCoverSetter) SetScreenshot(scene *models.Scene, imageData []byte) error {
|
||||
// don't set where scene has no file
|
||||
if scene.Path == "" {
|
||||
return nil
|
||||
}
|
||||
checksum := scene.GetHash(ss.FileNamingAlgorithm)
|
||||
return SetScreenshot(ss.Paths, checksum, imageData)
|
||||
}
|
||||
|
||||
func writeImage(path string, imageData []byte) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
_, err = f.Write(imageData)
|
||||
return err
|
||||
}
|
||||
|
||||
func writeThumbnail(path string, thumbnail image.Image) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return jpeg.Encode(f, thumbnail, nil)
|
||||
}
|
||||
|
||||
func SetScreenshot(paths *paths.Paths, checksum string, imageData []byte) error {
|
||||
thumbPath := paths.Scene.GetThumbnailScreenshotPath(checksum)
|
||||
normalPath := paths.Scene.GetScreenshotPath(checksum)
|
||||
|
||||
img, _, err := image.Decode(bytes.NewReader(imageData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// resize to 320 width maintaining aspect ratio, for the thumbnail
|
||||
const width = 320
|
||||
origWidth := img.Bounds().Max.X
|
||||
origHeight := img.Bounds().Max.Y
|
||||
height := width / origWidth * origHeight
|
||||
|
||||
thumbnail := imaging.Resize(img, width, height, imaging.Lanczos)
|
||||
err = writeThumbnail(thumbPath, thumbnail)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writeImage(normalPath, imageData)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Service) GetCover(ctx context.Context, scene *models.Scene) ([]byte, error) {
|
||||
if scene.Path != "" {
|
||||
filepath := s.Paths.Scene.GetScreenshotPath(scene.GetHash(s.Config.GetVideoFileNamingAlgorithm()))
|
||||
|
||||
// fall back to the scene image blob if the file isn't present
|
||||
screenshotExists, _ := fsutil.FileExists(filepath)
|
||||
if screenshotExists {
|
||||
return os.ReadFile(filepath)
|
||||
}
|
||||
}
|
||||
|
||||
return s.Repository.GetCover(ctx, scene.ID)
|
||||
}
|
||||
@@ -22,6 +22,7 @@ type Creator interface {
|
||||
}
|
||||
|
||||
type CoverUpdater interface {
|
||||
HasCover(ctx context.Context, sceneID int) (bool, error)
|
||||
UpdateCover(ctx context.Context, sceneID int, cover []byte) error
|
||||
}
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ func (u *UpdateSet) IsEmpty() bool {
|
||||
// Update updates a scene by updating the fields in the Partial field, then
|
||||
// updates non-nil relationships. Returns an error if there is no work to
|
||||
// be done.
|
||||
func (u *UpdateSet) Update(ctx context.Context, qb Updater, screenshotSetter ScreenshotSetter) (*models.Scene, error) {
|
||||
func (u *UpdateSet) Update(ctx context.Context, qb Updater) (*models.Scene, error) {
|
||||
if u.IsEmpty() {
|
||||
return nil, ErrEmptyUpdater
|
||||
}
|
||||
@@ -64,10 +64,6 @@ func (u *UpdateSet) Update(ctx context.Context, qb Updater, screenshotSetter Scr
|
||||
if err := qb.UpdateCover(ctx, u.ID, u.CoverImage); err != nil {
|
||||
return nil, fmt.Errorf("error updating scene cover: %w", err)
|
||||
}
|
||||
|
||||
if err := screenshotSetter.SetScreenshot(ret, u.CoverImage); err != nil {
|
||||
return nil, fmt.Errorf("error setting scene screenshot: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
|
||||
@@ -93,12 +93,6 @@ func TestUpdater_IsEmpty(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type mockScreenshotSetter struct{}
|
||||
|
||||
func (s *mockScreenshotSetter) SetScreenshot(scene *models.Scene, imageData []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestUpdater_Update(t *testing.T) {
|
||||
const (
|
||||
sceneID = iota + 1
|
||||
@@ -210,7 +204,7 @@ func TestUpdater_Update(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := tt.u.Update(ctx, &qb, &mockScreenshotSetter{})
|
||||
got, err := tt.u.Update(ctx, &qb)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Updater.Update() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
||||
@@ -932,7 +932,7 @@ func (c Client) SubmitSceneDraft(ctx context.Context, scene *models.Scene, endpo
|
||||
}
|
||||
draft.Tags = tags
|
||||
|
||||
if cover != nil {
|
||||
if len(cover) > 0 {
|
||||
image = bytes.NewReader(cover)
|
||||
}
|
||||
|
||||
|
||||
@@ -70,6 +70,11 @@ func (db *Anonymiser) Anonymise(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *Anonymiser) truncateColumn(tableName string, column string) error {
|
||||
_, err := db.db.Exec("UPDATE " + tableName + " SET " + column + " = NULL")
|
||||
return err
|
||||
}
|
||||
|
||||
func (db *Anonymiser) truncateTable(tableName string) error {
|
||||
_, err := db.db.Exec("DELETE FROM " + tableName)
|
||||
return err
|
||||
@@ -77,11 +82,14 @@ func (db *Anonymiser) truncateTable(tableName string) error {
|
||||
|
||||
func (db *Anonymiser) deleteBlobs() error {
|
||||
return utils.Do([]func() error{
|
||||
func() error { return db.truncateTable("scenes_cover") },
|
||||
func() error { return db.truncateTable("movies_images") },
|
||||
func() error { return db.truncateTable("performers_image") },
|
||||
func() error { return db.truncateTable("studios_image") },
|
||||
func() error { return db.truncateTable("tags_image") },
|
||||
func() error { return db.truncateColumn("tags", "image_blob") },
|
||||
func() error { return db.truncateColumn("studios", "image_blob") },
|
||||
func() error { return db.truncateColumn("performers", "image_blob") },
|
||||
func() error { return db.truncateColumn("scenes", "cover_blob") },
|
||||
func() error { return db.truncateColumn("movies", "front_image_blob") },
|
||||
func() error { return db.truncateColumn("movies", "back_image_blob") },
|
||||
|
||||
func() error { return db.truncateTable("blobs") },
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
382
pkg/sqlite/blob.go
Normal file
382
pkg/sqlite/blob.go
Normal file
@@ -0,0 +1,382 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
|
||||
"github.com/doug-martin/goqu/v9"
|
||||
"github.com/doug-martin/goqu/v9/exp"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/mattn/go-sqlite3"
|
||||
"github.com/stashapp/stash/pkg/file"
|
||||
"github.com/stashapp/stash/pkg/hash/md5"
|
||||
"github.com/stashapp/stash/pkg/sqlite/blob"
|
||||
"github.com/stashapp/stash/pkg/utils"
|
||||
"gopkg.in/guregu/null.v4"
|
||||
)
|
||||
|
||||
const (
|
||||
blobTable = "blobs"
|
||||
blobChecksumColumn = "checksum"
|
||||
)
|
||||
|
||||
type BlobStoreOptions struct {
|
||||
// UseFilesystem should be true if blob data should be stored in the filesystem
|
||||
UseFilesystem bool
|
||||
// UseDatabase should be true if blob data should be stored in the database
|
||||
UseDatabase bool
|
||||
// Path is the filesystem path to use for storing blobs
|
||||
Path string
|
||||
}
|
||||
|
||||
type BlobStore struct {
|
||||
repository
|
||||
|
||||
tableMgr *table
|
||||
|
||||
fsStore *blob.FilesystemStore
|
||||
options BlobStoreOptions
|
||||
}
|
||||
|
||||
func NewBlobStore(options BlobStoreOptions) *BlobStore {
|
||||
return &BlobStore{
|
||||
repository: repository{
|
||||
tableName: blobTable,
|
||||
idColumn: blobChecksumColumn,
|
||||
},
|
||||
|
||||
tableMgr: blobTableMgr,
|
||||
|
||||
fsStore: blob.NewFilesystemStore(options.Path, &file.OsFS{}),
|
||||
options: options,
|
||||
}
|
||||
}
|
||||
|
||||
type blobRow struct {
|
||||
Checksum string `db:"checksum"`
|
||||
Blob []byte `db:"blob"`
|
||||
}
|
||||
|
||||
func (qb *BlobStore) table() exp.IdentifierExpression {
|
||||
return qb.tableMgr.table
|
||||
}
|
||||
|
||||
func (qb *BlobStore) Count(ctx context.Context) (int, error) {
|
||||
table := qb.table()
|
||||
q := dialect.From(table).Select(goqu.COUNT(table.Col(blobChecksumColumn)))
|
||||
|
||||
var ret int
|
||||
if err := querySimple(ctx, q, &ret); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Write stores the data and its checksum in enabled stores.
|
||||
// Always writes at least the checksum to the database.
|
||||
func (qb *BlobStore) Write(ctx context.Context, data []byte) (string, error) {
|
||||
if !qb.options.UseDatabase && !qb.options.UseFilesystem {
|
||||
panic("no blob store configured")
|
||||
}
|
||||
|
||||
if len(data) == 0 {
|
||||
return "", fmt.Errorf("cannot write empty data")
|
||||
}
|
||||
|
||||
checksum := md5.FromBytes(data)
|
||||
|
||||
// only write blob to the database if UseDatabase is true
|
||||
// always at least write the checksum
|
||||
var storedData []byte
|
||||
if qb.options.UseDatabase {
|
||||
storedData = data
|
||||
}
|
||||
|
||||
if err := qb.write(ctx, checksum, storedData); err != nil {
|
||||
return "", fmt.Errorf("writing to database: %w", err)
|
||||
}
|
||||
|
||||
if qb.options.UseFilesystem {
|
||||
if err := qb.fsStore.Write(ctx, checksum, data); err != nil {
|
||||
return "", fmt.Errorf("writing to filesystem: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return checksum, nil
|
||||
}
|
||||
|
||||
func (qb *BlobStore) write(ctx context.Context, checksum string, data []byte) error {
|
||||
table := qb.table()
|
||||
q := dialect.Insert(table).Prepared(true).Rows(blobRow{
|
||||
Checksum: checksum,
|
||||
Blob: data,
|
||||
}).OnConflict(goqu.DoNothing())
|
||||
|
||||
_, err := exec(ctx, q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("inserting into %s: %w", table, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (qb *BlobStore) update(ctx context.Context, checksum string, data []byte) error {
|
||||
table := qb.table()
|
||||
q := dialect.Update(table).Prepared(true).Set(goqu.Record{
|
||||
"blob": data,
|
||||
}).Where(goqu.C(blobChecksumColumn).Eq(checksum))
|
||||
|
||||
_, err := exec(ctx, q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("updating %s: %w", table, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ChecksumNotFoundError struct {
|
||||
Checksum string
|
||||
}
|
||||
|
||||
func (e *ChecksumNotFoundError) Error() string {
|
||||
return fmt.Sprintf("checksum %s does not exist", e.Checksum)
|
||||
}
|
||||
|
||||
type ChecksumBlobNotExistError struct {
|
||||
Checksum string
|
||||
}
|
||||
|
||||
func (e *ChecksumBlobNotExistError) Error() string {
|
||||
return fmt.Sprintf("blob for checksum %s does not exist", e.Checksum)
|
||||
}
|
||||
|
||||
func (qb *BlobStore) readSQL(ctx context.Context, querySQL string, args ...interface{}) ([]byte, string, error) {
|
||||
if !qb.options.UseDatabase && !qb.options.UseFilesystem {
|
||||
panic("no blob store configured")
|
||||
}
|
||||
|
||||
// always try to get from the database first, even if set to use filesystem
|
||||
var row blobRow
|
||||
found := false
|
||||
const single = true
|
||||
if err := qb.queryFunc(ctx, querySQL, args, single, func(r *sqlx.Rows) error {
|
||||
found = true
|
||||
if err := r.StructScan(&row); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, "", fmt.Errorf("reading from database: %w", err)
|
||||
}
|
||||
|
||||
if !found {
|
||||
// not found in the database - does not exist
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
checksum := row.Checksum
|
||||
|
||||
if row.Blob != nil {
|
||||
return row.Blob, checksum, nil
|
||||
}
|
||||
|
||||
// don't use the filesystem if not configured to do so
|
||||
if qb.options.UseFilesystem {
|
||||
ret, err := qb.fsStore.Read(ctx, checksum)
|
||||
if err == nil {
|
||||
return ret, checksum, nil
|
||||
}
|
||||
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, checksum, fmt.Errorf("reading from filesystem: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, checksum, &ChecksumBlobNotExistError{
|
||||
Checksum: checksum,
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads the data from the database or filesystem, depending on which is enabled.
|
||||
func (qb *BlobStore) Read(ctx context.Context, checksum string) ([]byte, error) {
|
||||
if !qb.options.UseDatabase && !qb.options.UseFilesystem {
|
||||
panic("no blob store configured")
|
||||
}
|
||||
|
||||
// always try to get from the database first, even if set to use filesystem
|
||||
ret, err := qb.readFromDatabase(ctx, checksum)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, fmt.Errorf("reading from database: %w", err)
|
||||
}
|
||||
|
||||
// not found in the database - does not exist
|
||||
return nil, &ChecksumNotFoundError{
|
||||
Checksum: checksum,
|
||||
}
|
||||
}
|
||||
|
||||
if ret != nil {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// don't use the filesystem if not configured to do so
|
||||
if qb.options.UseFilesystem {
|
||||
ret, err := qb.fsStore.Read(ctx, checksum)
|
||||
if err == nil {
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, fmt.Errorf("reading from filesystem: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// blob not found - should not happen
|
||||
return nil, &ChecksumBlobNotExistError{
|
||||
Checksum: checksum,
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *BlobStore) readFromDatabase(ctx context.Context, checksum string) ([]byte, error) {
|
||||
q := dialect.From(qb.table()).Select(qb.table().All()).Where(qb.tableMgr.byID(checksum))
|
||||
|
||||
var row blobRow
|
||||
const single = true
|
||||
if err := queryFunc(ctx, q, single, func(r *sqlx.Rows) error {
|
||||
if err := r.StructScan(&row); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("querying %s: %w", qb.table(), err)
|
||||
}
|
||||
|
||||
return row.Blob, nil
|
||||
}
|
||||
|
||||
// Delete marks a checksum as no longer in use by a single reference.
|
||||
// If no references remain, the blob is deleted from the database and filesystem.
|
||||
func (qb *BlobStore) Delete(ctx context.Context, checksum string) error {
|
||||
// try to delete the blob from the database
|
||||
if err := qb.delete(ctx, checksum); err != nil {
|
||||
if qb.isConstraintError(err) {
|
||||
// blob is still referenced - do not delete
|
||||
return nil
|
||||
}
|
||||
|
||||
// unexpected error
|
||||
return fmt.Errorf("deleting from database: %w", err)
|
||||
}
|
||||
|
||||
// blob was deleted from the database - delete from filesystem if enabled
|
||||
if qb.options.UseFilesystem {
|
||||
if err := qb.fsStore.Delete(ctx, checksum); err != nil {
|
||||
return fmt.Errorf("deleting from filesystem: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (qb *BlobStore) isConstraintError(err error) bool {
|
||||
var sqliteError sqlite3.Error
|
||||
if errors.As(err, &sqliteError) {
|
||||
return sqliteError.Code == sqlite3.ErrConstraint
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (qb *BlobStore) delete(ctx context.Context, checksum string) error {
|
||||
table := qb.table()
|
||||
|
||||
q := dialect.Delete(table).Where(goqu.C(blobChecksumColumn).Eq(checksum))
|
||||
|
||||
_, err := exec(ctx, q)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting from %s: %w", table, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type blobJoinQueryBuilder struct {
|
||||
repository
|
||||
blobStore *BlobStore
|
||||
|
||||
joinTable string
|
||||
}
|
||||
|
||||
func (qb *blobJoinQueryBuilder) GetImage(ctx context.Context, id int, blobCol string) ([]byte, error) {
|
||||
sqlQuery := utils.StrFormat(`
|
||||
SELECT blobs.checksum, blobs.blob FROM {joinTable} INNER JOIN blobs ON {joinTable}.{joinCol} = blobs.checksum
|
||||
WHERE {joinTable}.id = ?
|
||||
`, utils.StrFormatMap{
|
||||
"joinTable": qb.joinTable,
|
||||
"joinCol": blobCol,
|
||||
})
|
||||
|
||||
ret, _, err := qb.blobStore.readSQL(ctx, sqlQuery, id)
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (qb *blobJoinQueryBuilder) UpdateImage(ctx context.Context, id int, blobCol string, image []byte) error {
|
||||
if len(image) == 0 {
|
||||
return qb.DestroyImage(ctx, id, blobCol)
|
||||
}
|
||||
checksum, err := qb.blobStore.Write(ctx, image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sqlQuery := fmt.Sprintf("UPDATE %s SET %s = ? WHERE id = ?", qb.joinTable, blobCol)
|
||||
_, err = qb.tx.Exec(ctx, sqlQuery, checksum, id)
|
||||
return err
|
||||
}
|
||||
|
||||
func (qb *blobJoinQueryBuilder) DestroyImage(ctx context.Context, id int, blobCol string) error {
|
||||
sqlQuery := utils.StrFormat(`
|
||||
SELECT {joinTable}.{joinCol} FROM {joinTable} WHERE {joinTable}.id = ?
|
||||
`, utils.StrFormatMap{
|
||||
"joinTable": qb.joinTable,
|
||||
"joinCol": blobCol,
|
||||
})
|
||||
|
||||
var checksum null.String
|
||||
err := qb.repository.querySimple(ctx, sqlQuery, []interface{}{id}, &checksum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !checksum.Valid {
|
||||
// no image to delete
|
||||
return nil
|
||||
}
|
||||
|
||||
updateQuery := fmt.Sprintf("UPDATE %s SET %s = NULL WHERE id = ?", qb.joinTable, blobCol)
|
||||
if _, err = qb.tx.Exec(ctx, updateQuery, id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return qb.blobStore.Delete(ctx, checksum.String)
|
||||
}
|
||||
|
||||
func (qb *blobJoinQueryBuilder) HasImage(ctx context.Context, id int, blobCol string) (bool, error) {
|
||||
stmt := utils.StrFormat("SELECT COUNT(*) as count FROM (SELECT {joinCol} FROM {joinTable} WHERE id = ? AND {joinCol} IS NOT NULL LIMIT 1)", utils.StrFormatMap{
|
||||
"joinTable": qb.joinTable,
|
||||
"joinCol": blobCol,
|
||||
})
|
||||
|
||||
c, err := qb.runCountQuery(ctx, stmt, []interface{}{id})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return c == 1, nil
|
||||
}
|
||||
108
pkg/sqlite/blob/fs.go
Normal file
108
pkg/sqlite/blob/fs.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package blob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/stashapp/stash/pkg/file"
|
||||
"github.com/stashapp/stash/pkg/fsutil"
|
||||
)
|
||||
|
||||
const (
|
||||
blobsDirDepth int = 2
|
||||
blobsDirLength int = 2 // thumbDirDepth * thumbDirLength must be smaller than the length of checksum
|
||||
)
|
||||
|
||||
type FS interface {
|
||||
Create(name string) (*os.File, error)
|
||||
MkdirAll(path string, perm fs.FileMode) error
|
||||
Open(name string) (fs.ReadDirFile, error)
|
||||
Remove(name string) error
|
||||
|
||||
file.RenamerRemover
|
||||
}
|
||||
|
||||
type FilesystemStore struct {
|
||||
deleter *file.Deleter
|
||||
path string
|
||||
fs FS
|
||||
}
|
||||
|
||||
func NewFilesystemStore(path string, fs FS) *FilesystemStore {
|
||||
deleter := &file.Deleter{
|
||||
RenamerRemover: fs,
|
||||
}
|
||||
|
||||
return &FilesystemStore{
|
||||
deleter: deleter,
|
||||
path: path,
|
||||
fs: fs,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FilesystemStore) checksumToPath(checksum string) string {
|
||||
return filepath.Join(s.path, fsutil.GetIntraDir(checksum, blobsDirDepth, blobsDirLength), checksum)
|
||||
}
|
||||
|
||||
func (s *FilesystemStore) Write(ctx context.Context, checksum string, data []byte) error {
|
||||
if s.path == "" {
|
||||
return fmt.Errorf("no path set")
|
||||
}
|
||||
|
||||
fn := s.checksumToPath(checksum)
|
||||
|
||||
// create the directory if it doesn't exist
|
||||
if err := s.fs.MkdirAll(filepath.Dir(fn), 0755); err != nil {
|
||||
return fmt.Errorf("creating directory %q: %w", filepath.Dir(fn), err)
|
||||
}
|
||||
|
||||
out, err := s.fs.Create(fn)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating file %q: %w", fn, err)
|
||||
}
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
|
||||
if _, err = io.Copy(out, r); err != nil {
|
||||
return fmt.Errorf("writing file %q: %w", fn, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FilesystemStore) Read(ctx context.Context, checksum string) ([]byte, error) {
|
||||
if s.path == "" {
|
||||
return nil, fmt.Errorf("no path set")
|
||||
}
|
||||
|
||||
fn := s.checksumToPath(checksum)
|
||||
f, err := s.fs.Open(fn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("opening file %q: %w", fn, err)
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
return io.ReadAll(f)
|
||||
}
|
||||
|
||||
func (s *FilesystemStore) Delete(ctx context.Context, checksum string) error {
|
||||
if s.path == "" {
|
||||
return fmt.Errorf("no path set")
|
||||
}
|
||||
|
||||
s.deleter.RegisterHooks(ctx)
|
||||
|
||||
fn := s.checksumToPath(checksum)
|
||||
|
||||
if err := s.deleter.Files([]string{fn}); err != nil {
|
||||
return fmt.Errorf("deleting file %q: %w", fn, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
116
pkg/sqlite/blob_migrate.go
Normal file
116
pkg/sqlite/blob_migrate.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
func (qb *BlobStore) FindBlobs(ctx context.Context, n uint, lastChecksum string) ([]string, error) {
|
||||
table := qb.table()
|
||||
q := dialect.From(table).Select(table.Col(blobChecksumColumn)).Order(table.Col(blobChecksumColumn).Asc()).Limit(n)
|
||||
|
||||
if lastChecksum != "" {
|
||||
q = q.Where(table.Col(blobChecksumColumn).Gt(lastChecksum))
|
||||
}
|
||||
|
||||
const single = false
|
||||
var checksums []string
|
||||
if err := queryFunc(ctx, q, single, func(rows *sqlx.Rows) error {
|
||||
var checksum string
|
||||
if err := rows.Scan(&checksum); err != nil {
|
||||
return err
|
||||
}
|
||||
checksums = append(checksums, checksum)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return checksums, nil
|
||||
}
|
||||
|
||||
// MigrateBlob migrates a blob from the filesystem to the database, or vice versa.
|
||||
// The target is determined by the UseDatabase and UseFilesystem options.
|
||||
// If deleteOld is true, the blob is deleted from the source after migration.
|
||||
func (qb *BlobStore) MigrateBlob(ctx context.Context, checksum string, deleteOld bool) error {
|
||||
if !qb.options.UseDatabase && !qb.options.UseFilesystem {
|
||||
panic("no blob store configured")
|
||||
}
|
||||
|
||||
if qb.options.UseDatabase && qb.options.UseFilesystem {
|
||||
panic("both filesystem and database configured")
|
||||
}
|
||||
|
||||
if qb.options.Path == "" {
|
||||
panic("no blob path configured")
|
||||
}
|
||||
|
||||
if qb.options.UseDatabase {
|
||||
return qb.migrateBlobDatabase(ctx, checksum, deleteOld)
|
||||
}
|
||||
|
||||
return qb.migrateBlobFilesystem(ctx, checksum, deleteOld)
|
||||
}
|
||||
|
||||
// migrateBlobDatabase migrates a blob from the filesystem to the database
|
||||
func (qb *BlobStore) migrateBlobDatabase(ctx context.Context, checksum string, deleteOld bool) error {
|
||||
// ignore if the blob is already present in the database
|
||||
// (still delete the old data if requested)
|
||||
existing, err := qb.readFromDatabase(ctx, checksum)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading from database: %w", err)
|
||||
}
|
||||
|
||||
if len(existing) == 0 {
|
||||
// find the blob in the filesystem
|
||||
blob, err := qb.fsStore.Read(ctx, checksum)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading from filesystem: %w", err)
|
||||
}
|
||||
|
||||
// write the blob to the database
|
||||
if err := qb.update(ctx, checksum, blob); err != nil {
|
||||
return fmt.Errorf("writing to database: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if deleteOld {
|
||||
// delete the blob from the filesystem after commit
|
||||
if err := qb.fsStore.Delete(ctx, checksum); err != nil {
|
||||
return fmt.Errorf("deleting from filesystem: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateBlobFilesystem migrates a blob from the database to the filesystem
|
||||
func (qb *BlobStore) migrateBlobFilesystem(ctx context.Context, checksum string, deleteOld bool) error {
|
||||
// find the blob in the database
|
||||
blob, err := qb.readFromDatabase(ctx, checksum)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading from database: %w", err)
|
||||
}
|
||||
|
||||
if len(blob) == 0 {
|
||||
// it's possible that the blob is already present in the filesystem
|
||||
// just ignore
|
||||
return nil
|
||||
}
|
||||
|
||||
// write the blob to the filesystem
|
||||
if err := qb.fsStore.Write(ctx, checksum, blob); err != nil {
|
||||
return fmt.Errorf("writing to filesystem: %w", err)
|
||||
}
|
||||
|
||||
if deleteOld {
|
||||
// delete the blob from the database row
|
||||
if err := qb.update(ctx, checksum, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
45
pkg/sqlite/blob_test.go
Normal file
45
pkg/sqlite/blob_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package sqlite_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type updateImageFunc func(ctx context.Context, id int, image []byte) error
|
||||
type getImageFunc func(ctx context.Context, movieID int) ([]byte, error)
|
||||
|
||||
func testUpdateImage(t *testing.T, ctx context.Context, id int, updateFn updateImageFunc, getFn getImageFunc) error {
|
||||
image := []byte("image")
|
||||
err := updateFn(ctx, id, image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating performer image: %s", err.Error())
|
||||
}
|
||||
|
||||
// ensure image set
|
||||
storedImage, err := getFn(ctx, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting image: %s", err.Error())
|
||||
}
|
||||
assert.Equal(t, storedImage, image)
|
||||
|
||||
// set nil image
|
||||
err = updateFn(ctx, id, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error setting nil image: %w", err)
|
||||
}
|
||||
|
||||
// ensure image null
|
||||
storedImage, err = getFn(ctx, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting image: %s", err.Error())
|
||||
}
|
||||
assert.Nil(t, storedImage)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -32,7 +32,7 @@ const (
|
||||
dbConnTimeout = 30
|
||||
)
|
||||
|
||||
var appSchemaVersion uint = 44
|
||||
var appSchemaVersion uint = 45
|
||||
|
||||
//go:embed migrations/*.sql
|
||||
var migrationsBox embed.FS
|
||||
@@ -64,12 +64,16 @@ func (e *MismatchedSchemaVersionError) Error() string {
|
||||
}
|
||||
|
||||
type Database struct {
|
||||
Blobs *BlobStore
|
||||
File *FileStore
|
||||
Folder *FolderStore
|
||||
Image *ImageStore
|
||||
Gallery *GalleryStore
|
||||
Scene *SceneStore
|
||||
Performer *PerformerStore
|
||||
Studio *studioQueryBuilder
|
||||
Tag *tagQueryBuilder
|
||||
Movie *movieQueryBuilder
|
||||
|
||||
db *sqlx.DB
|
||||
dbPath string
|
||||
@@ -82,20 +86,29 @@ type Database struct {
|
||||
func NewDatabase() *Database {
|
||||
fileStore := NewFileStore()
|
||||
folderStore := NewFolderStore()
|
||||
blobStore := NewBlobStore(BlobStoreOptions{})
|
||||
|
||||
ret := &Database{
|
||||
Blobs: blobStore,
|
||||
File: fileStore,
|
||||
Folder: folderStore,
|
||||
Scene: NewSceneStore(fileStore),
|
||||
Scene: NewSceneStore(fileStore, blobStore),
|
||||
Image: NewImageStore(fileStore),
|
||||
Gallery: NewGalleryStore(fileStore, folderStore),
|
||||
Performer: NewPerformerStore(),
|
||||
Performer: NewPerformerStore(blobStore),
|
||||
Studio: NewStudioReaderWriter(blobStore),
|
||||
Tag: NewTagReaderWriter(blobStore),
|
||||
Movie: NewMovieReaderWriter(blobStore),
|
||||
lockChan: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (db *Database) SetBlobStoreOptions(options BlobStoreOptions) {
|
||||
*db.Blobs = *NewBlobStore(options)
|
||||
}
|
||||
|
||||
// Ready returns an error if the database is not ready to begin transactions.
|
||||
func (db *Database) Ready() error {
|
||||
if db.db == nil {
|
||||
@@ -433,6 +446,12 @@ func (db *Database) optimise() {
|
||||
}
|
||||
}
|
||||
|
||||
// Vacuum runs a VACUUM on the database, rebuilding the database file into a minimal amount of disk space.
|
||||
func (db *Database) Vacuum(ctx context.Context) error {
|
||||
_, err := db.db.ExecContext(ctx, "VACUUM")
|
||||
return err
|
||||
}
|
||||
|
||||
func (db *Database) runCustomMigrations(ctx context.Context, fns []customMigrationFunc) error {
|
||||
for _, fn := range fns {
|
||||
if err := db.runCustomMigration(ctx, fn); err != nil {
|
||||
|
||||
19
pkg/sqlite/migrations/45_blobs.up.sql
Normal file
19
pkg/sqlite/migrations/45_blobs.up.sql
Normal file
@@ -0,0 +1,19 @@
|
||||
CREATE TABLE `blobs` (
|
||||
`checksum` varchar(255) NOT NULL PRIMARY KEY,
|
||||
`blob` blob
|
||||
);
|
||||
|
||||
ALTER TABLE `tags` ADD COLUMN `image_blob` varchar(255) REFERENCES `blobs`(`checksum`);
|
||||
ALTER TABLE `studios` ADD COLUMN `image_blob` varchar(255) REFERENCES `blobs`(`checksum`);
|
||||
ALTER TABLE `performers` ADD COLUMN `image_blob` varchar(255) REFERENCES `blobs`(`checksum`);
|
||||
ALTER TABLE `scenes` ADD COLUMN `cover_blob` varchar(255) REFERENCES `blobs`(`checksum`);
|
||||
|
||||
ALTER TABLE `movies` ADD COLUMN `front_image_blob` varchar(255) REFERENCES `blobs`(`checksum`);
|
||||
ALTER TABLE `movies` ADD COLUMN `back_image_blob` varchar(255) REFERENCES `blobs`(`checksum`);
|
||||
|
||||
-- performed in the post-migration
|
||||
-- DROP TABLE `tags_image`;
|
||||
-- DROP TABLE `studios_image`;
|
||||
-- DROP TABLE `performers_image`;
|
||||
-- DROP TABLE `scenes_cover`;
|
||||
-- DROP TABLE `movies_images`;
|
||||
286
pkg/sqlite/migrations/45_postmigrate.go
Normal file
286
pkg/sqlite/migrations/45_postmigrate.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/stashapp/stash/internal/manager/config"
|
||||
"github.com/stashapp/stash/pkg/hash/md5"
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/sqlite"
|
||||
"github.com/stashapp/stash/pkg/utils"
|
||||
)
|
||||
|
||||
type schema45Migrator struct {
|
||||
migrator
|
||||
hasBlobs bool
|
||||
}
|
||||
|
||||
func post45(ctx context.Context, db *sqlx.DB) error {
|
||||
logger.Info("Running post-migration for schema version 45")
|
||||
|
||||
m := schema45Migrator{
|
||||
migrator: migrator{
|
||||
db: db,
|
||||
},
|
||||
}
|
||||
|
||||
if err := m.migrateImagesTable(ctx, migrateImagesTableOptions{
|
||||
joinTable: "tags_image",
|
||||
joinIDCol: "tag_id",
|
||||
destTable: "tags",
|
||||
cols: []migrateImageToBlobOptions{
|
||||
{
|
||||
joinImageCol: "image",
|
||||
destCol: "image_blob",
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.migrateImagesTable(ctx, migrateImagesTableOptions{
|
||||
joinTable: "studios_image",
|
||||
joinIDCol: "studio_id",
|
||||
destTable: "studios",
|
||||
cols: []migrateImageToBlobOptions{
|
||||
{
|
||||
joinImageCol: "image",
|
||||
destCol: "image_blob",
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.migrateImagesTable(ctx, migrateImagesTableOptions{
|
||||
joinTable: "performers_image",
|
||||
joinIDCol: "performer_id",
|
||||
destTable: "performers",
|
||||
cols: []migrateImageToBlobOptions{
|
||||
{
|
||||
joinImageCol: "image",
|
||||
destCol: "image_blob",
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.migrateImagesTable(ctx, migrateImagesTableOptions{
|
||||
joinTable: "scenes_cover",
|
||||
joinIDCol: "scene_id",
|
||||
destTable: "scenes",
|
||||
cols: []migrateImageToBlobOptions{
|
||||
{
|
||||
joinImageCol: "cover",
|
||||
destCol: "cover_blob",
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.migrateImagesTable(ctx, migrateImagesTableOptions{
|
||||
joinTable: "movies_images",
|
||||
joinIDCol: "movie_id",
|
||||
destTable: "movies",
|
||||
cols: []migrateImageToBlobOptions{
|
||||
{
|
||||
joinImageCol: "front_image",
|
||||
destCol: "front_image_blob",
|
||||
},
|
||||
{
|
||||
joinImageCol: "back_image",
|
||||
destCol: "back_image_blob",
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tablesToDrop := []string{
|
||||
"tags_image",
|
||||
"studios_image",
|
||||
"performers_image",
|
||||
"scenes_cover",
|
||||
"movies_images",
|
||||
}
|
||||
|
||||
for _, table := range tablesToDrop {
|
||||
if err := m.dropTable(ctx, table); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := m.migrateConfig(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type migrateImageToBlobOptions struct {
|
||||
joinImageCol string
|
||||
destCol string
|
||||
}
|
||||
|
||||
type migrateImagesTableOptions struct {
|
||||
joinTable string
|
||||
joinIDCol string
|
||||
destTable string
|
||||
cols []migrateImageToBlobOptions
|
||||
}
|
||||
|
||||
func (o migrateImagesTableOptions) selectColumns() string {
|
||||
var cols []string
|
||||
for _, c := range o.cols {
|
||||
cols = append(cols, "`"+c.joinImageCol+"`")
|
||||
}
|
||||
|
||||
return strings.Join(cols, ", ")
|
||||
}
|
||||
|
||||
func (m *schema45Migrator) migrateImagesTable(ctx context.Context, options migrateImagesTableOptions) error {
|
||||
logger.Infof("Moving %s to blobs table", options.joinTable)
|
||||
|
||||
const (
|
||||
limit = 1000
|
||||
logEvery = 10000
|
||||
)
|
||||
|
||||
count := 0
|
||||
|
||||
for {
|
||||
gotSome := false
|
||||
|
||||
if err := m.withTxn(ctx, func(tx *sqlx.Tx) error {
|
||||
query := fmt.Sprintf("SELECT %s, %s FROM `%s`", options.joinIDCol, options.selectColumns(), options.joinTable)
|
||||
|
||||
query += fmt.Sprintf(" LIMIT %d", limit)
|
||||
|
||||
rows, err := m.db.Query(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
m.hasBlobs = true
|
||||
|
||||
var id int
|
||||
|
||||
result := make([]interface{}, len(options.cols)+1)
|
||||
result[0] = &id
|
||||
for i := range options.cols {
|
||||
v := []byte{}
|
||||
result[i+1] = &v
|
||||
}
|
||||
|
||||
err := rows.Scan(result...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gotSome = true
|
||||
count++
|
||||
|
||||
for i, col := range options.cols {
|
||||
image := result[i+1].(*[]byte)
|
||||
|
||||
if len(*image) > 0 {
|
||||
if err := m.insertImage(*image, id, options.destTable, col.destCol); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// delete the row from the join table so we don't process it again
|
||||
deleteSQL := utils.StrFormat("DELETE FROM `{joinTable}` WHERE `{joinIDCol}` = ?", utils.StrFormatMap{
|
||||
"joinTable": options.joinTable,
|
||||
"joinIDCol": options.joinIDCol,
|
||||
})
|
||||
if _, err := m.db.Exec(deleteSQL, id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return rows.Err()
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !gotSome {
|
||||
break
|
||||
}
|
||||
|
||||
if count%logEvery == 0 {
|
||||
logger.Infof("Migrated %d images", count)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *schema45Migrator) insertImage(data []byte, id int, destTable string, destCol string) error {
|
||||
// calculate checksum and insert into blobs table
|
||||
checksum := md5.FromBytes(data)
|
||||
|
||||
if _, err := m.db.Exec("INSERT INTO `blobs` (`checksum`, `blob`) VALUES (?, ?) ON CONFLICT DO NOTHING", checksum, data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set the tag image checksum
|
||||
updateSQL := utils.StrFormat("UPDATE `{destTable}` SET `{destCol}` = ? WHERE `id` = ?", utils.StrFormatMap{
|
||||
"destTable": destTable,
|
||||
"destCol": destCol,
|
||||
})
|
||||
if _, err := m.db.Exec(updateSQL, checksum, id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *schema45Migrator) dropTable(ctx context.Context, table string) error {
|
||||
if err := m.withTxn(ctx, func(tx *sqlx.Tx) error {
|
||||
logger.Debugf("Dropping %s", table)
|
||||
_, err := m.db.Exec(fmt.Sprintf("DROP TABLE `%s`", table))
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *schema45Migrator) migrateConfig(ctx context.Context) error {
|
||||
c := config.GetInstance()
|
||||
|
||||
// if we don't have blobs, and storage is already set, then don't overwrite
|
||||
if !m.hasBlobs && c.GetBlobsStorage().IsValid() {
|
||||
logger.Infof("Blobs storage already set, not overwriting")
|
||||
return nil
|
||||
}
|
||||
|
||||
// if we have blobs in the database, then default to database storage
|
||||
// otherwise default to filesystem storage
|
||||
defaultStorage := config.BlobStorageTypeFilesystem
|
||||
if m.hasBlobs || c.GetBlobsPath() == "" {
|
||||
defaultStorage = config.BlobStorageTypeDatabase
|
||||
}
|
||||
|
||||
logger.Infof("Setting blobs storage to %s", defaultStorage.String())
|
||||
c.Set(config.BlobsStorage, defaultStorage)
|
||||
if err := c.Write(); err != nil {
|
||||
logger.Errorf("Error while writing configuration file: %s", err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
sqlite.RegisterPostMigration(45, post45)
|
||||
}
|
||||
@@ -12,18 +12,30 @@ import (
|
||||
"github.com/stashapp/stash/pkg/sliceutil/intslice"
|
||||
)
|
||||
|
||||
const movieTable = "movies"
|
||||
const movieIDColumn = "movie_id"
|
||||
const (
|
||||
movieTable = "movies"
|
||||
movieIDColumn = "movie_id"
|
||||
|
||||
movieFrontImageBlobColumn = "front_image_blob"
|
||||
movieBackImageBlobColumn = "back_image_blob"
|
||||
)
|
||||
|
||||
type movieQueryBuilder struct {
|
||||
repository
|
||||
blobJoinQueryBuilder
|
||||
}
|
||||
|
||||
var MovieReaderWriter = &movieQueryBuilder{
|
||||
repository{
|
||||
tableName: movieTable,
|
||||
idColumn: idColumn,
|
||||
},
|
||||
func NewMovieReaderWriter(blobStore *BlobStore) *movieQueryBuilder {
|
||||
return &movieQueryBuilder{
|
||||
repository{
|
||||
tableName: movieTable,
|
||||
idColumn: idColumn,
|
||||
},
|
||||
blobJoinQueryBuilder{
|
||||
blobStore: blobStore,
|
||||
joinTable: movieTable,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *movieQueryBuilder) Create(ctx context.Context, newObject models.Movie) (*models.Movie, error) {
|
||||
@@ -54,6 +66,11 @@ func (qb *movieQueryBuilder) UpdateFull(ctx context.Context, updatedObject model
|
||||
}
|
||||
|
||||
func (qb *movieQueryBuilder) Destroy(ctx context.Context, id int) error {
|
||||
// must handle image checksums manually
|
||||
if err := qb.destroyImages(ctx, id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return qb.destroyExisting(ctx, []int{id})
|
||||
}
|
||||
|
||||
@@ -209,11 +226,9 @@ func movieIsMissingCriterionHandler(qb *movieQueryBuilder, isMissing *string) cr
|
||||
if isMissing != nil && *isMissing != "" {
|
||||
switch *isMissing {
|
||||
case "front_image":
|
||||
f.addLeftJoin("movies_images", "", "movies_images.movie_id = movies.id")
|
||||
f.addWhere("movies_images.front_image IS NULL")
|
||||
f.addWhere("movies.front_image_blob IS NULL")
|
||||
case "back_image":
|
||||
f.addLeftJoin("movies_images", "", "movies_images.movie_id = movies.id")
|
||||
f.addWhere("movies_images.back_image IS NULL")
|
||||
f.addWhere("movies.back_image_blob IS NULL")
|
||||
case "scenes":
|
||||
f.addLeftJoin("movies_scenes", "", "movies_scenes.movie_id = movies.id")
|
||||
f.addWhere("movies_scenes.scene_id IS NULL")
|
||||
@@ -322,39 +337,31 @@ func (qb *movieQueryBuilder) queryMovies(ctx context.Context, query string, args
|
||||
return []*models.Movie(ret), nil
|
||||
}
|
||||
|
||||
func (qb *movieQueryBuilder) UpdateImages(ctx context.Context, movieID int, frontImage []byte, backImage []byte) error {
|
||||
// Delete the existing cover and then create new
|
||||
if err := qb.DestroyImages(ctx, movieID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := qb.tx.Exec(ctx,
|
||||
`INSERT INTO movies_images (movie_id, front_image, back_image) VALUES (?, ?, ?)`,
|
||||
movieID,
|
||||
frontImage,
|
||||
backImage,
|
||||
)
|
||||
|
||||
return err
|
||||
func (qb *movieQueryBuilder) UpdateFrontImage(ctx context.Context, movieID int, frontImage []byte) error {
|
||||
return qb.UpdateImage(ctx, movieID, movieFrontImageBlobColumn, frontImage)
|
||||
}
|
||||
|
||||
func (qb *movieQueryBuilder) DestroyImages(ctx context.Context, movieID int) error {
|
||||
// Delete the existing joins
|
||||
_, err := qb.tx.Exec(ctx, "DELETE FROM movies_images WHERE movie_id = ?", movieID)
|
||||
if err != nil {
|
||||
func (qb *movieQueryBuilder) UpdateBackImage(ctx context.Context, movieID int, backImage []byte) error {
|
||||
return qb.UpdateImage(ctx, movieID, movieBackImageBlobColumn, backImage)
|
||||
}
|
||||
|
||||
func (qb *movieQueryBuilder) destroyImages(ctx context.Context, movieID int) error {
|
||||
if err := qb.DestroyImage(ctx, movieID, movieFrontImageBlobColumn); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
if err := qb.DestroyImage(ctx, movieID, movieBackImageBlobColumn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (qb *movieQueryBuilder) GetFrontImage(ctx context.Context, movieID int) ([]byte, error) {
|
||||
query := `SELECT front_image from movies_images WHERE movie_id = ?`
|
||||
return getImage(ctx, qb.tx, query, movieID)
|
||||
return qb.GetImage(ctx, movieID, movieFrontImageBlobColumn)
|
||||
}
|
||||
|
||||
func (qb *movieQueryBuilder) GetBackImage(ctx context.Context, movieID int) ([]byte, error) {
|
||||
query := `SELECT back_image from movies_images WHERE movie_id = ?`
|
||||
return getImage(ctx, qb.tx, query, movieID)
|
||||
return qb.GetImage(ctx, movieID, movieBackImageBlobColumn)
|
||||
}
|
||||
|
||||
func (qb *movieQueryBuilder) FindByPerformerID(ctx context.Context, performerID int) ([]*models.Movie, error) {
|
||||
|
||||
@@ -15,12 +15,11 @@ import (
|
||||
|
||||
"github.com/stashapp/stash/pkg/hash/md5"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/sqlite"
|
||||
)
|
||||
|
||||
func TestMovieFindByName(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
mqb := sqlite.MovieReaderWriter
|
||||
mqb := db.Movie
|
||||
|
||||
name := movieNames[movieIdxWithScene] // find a movie by name
|
||||
|
||||
@@ -53,7 +52,7 @@ func TestMovieFindByNames(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
var names []string
|
||||
|
||||
mqb := sqlite.MovieReaderWriter
|
||||
mqb := db.Movie
|
||||
|
||||
names = append(names, movieNames[movieIdxWithScene]) // find movies by names
|
||||
|
||||
@@ -76,9 +75,80 @@ func TestMovieFindByNames(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func moviesToIDs(i []*models.Movie) []int {
|
||||
ret := make([]int, len(i))
|
||||
for i, v := range i {
|
||||
ret[i] = v.ID
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func TestMovieQuery(t *testing.T) {
|
||||
var (
|
||||
frontImage = "front_image"
|
||||
backImage = "back_image"
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
findFilter *models.FindFilterType
|
||||
filter *models.MovieFilterType
|
||||
includeIdxs []int
|
||||
excludeIdxs []int
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
"is missing front image",
|
||||
nil,
|
||||
&models.MovieFilterType{
|
||||
IsMissing: &frontImage,
|
||||
},
|
||||
// just ensure that it doesn't error
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"is missing back image",
|
||||
nil,
|
||||
&models.MovieFilterType{
|
||||
IsMissing: &backImage,
|
||||
},
|
||||
// just ensure that it doesn't error
|
||||
nil,
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
runWithRollbackTxn(t, tt.name, func(t *testing.T, ctx context.Context) {
|
||||
assert := assert.New(t)
|
||||
|
||||
results, _, err := db.Movie.Query(ctx, tt.filter, tt.findFilter)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("MovieQueryBuilder.Query() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
|
||||
ids := moviesToIDs(results)
|
||||
include := indexesToIDs(performerIDs, tt.includeIdxs)
|
||||
exclude := indexesToIDs(performerIDs, tt.excludeIdxs)
|
||||
|
||||
for _, i := range include {
|
||||
assert.Contains(ids, i)
|
||||
}
|
||||
for _, e := range exclude {
|
||||
assert.NotContains(ids, e)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMovieQueryStudio(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
mqb := sqlite.MovieReaderWriter
|
||||
mqb := db.Movie
|
||||
studioCriterion := models.HierarchicalMultiCriterionInput{
|
||||
Value: []string{
|
||||
strconv.Itoa(studioIDs[studioIdxWithMovie]),
|
||||
@@ -163,7 +233,7 @@ func TestMovieQueryURL(t *testing.T) {
|
||||
func verifyMovieQuery(t *testing.T, filter models.MovieFilterType, verifyFn func(s *models.Movie)) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
t.Helper()
|
||||
sqb := sqlite.MovieReaderWriter
|
||||
sqb := db.Movie
|
||||
|
||||
movies := queryMovie(ctx, t, sqb, &filter, nil)
|
||||
|
||||
@@ -196,7 +266,7 @@ func TestMovieQuerySorting(t *testing.T) {
|
||||
}
|
||||
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.MovieReaderWriter
|
||||
sqb := db.Movie
|
||||
movies := queryMovie(ctx, t, sqb, nil, &findFilter)
|
||||
|
||||
// scenes should be in same order as indexes
|
||||
@@ -216,122 +286,50 @@ func TestMovieQuerySorting(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestMovieUpdateMovieImages(t *testing.T) {
|
||||
if err := withTxn(func(ctx context.Context) error {
|
||||
mqb := sqlite.MovieReaderWriter
|
||||
func TestMovieUpdateFrontImage(t *testing.T) {
|
||||
if err := withRollbackTxn(func(ctx context.Context) error {
|
||||
qb := db.Movie
|
||||
|
||||
// create movie to test against
|
||||
const name = "TestMovieUpdateMovieImages"
|
||||
movie := models.Movie{
|
||||
toCreate := models.Movie{
|
||||
Name: sql.NullString{String: name, Valid: true},
|
||||
Checksum: md5.FromString(name),
|
||||
}
|
||||
created, err := mqb.Create(ctx, movie)
|
||||
movie, err := qb.Create(ctx, toCreate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating movie: %s", err.Error())
|
||||
}
|
||||
|
||||
frontImage := []byte("frontImage")
|
||||
backImage := []byte("backImage")
|
||||
err = mqb.UpdateImages(ctx, created.ID, frontImage, backImage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating movie images: %s", err.Error())
|
||||
}
|
||||
|
||||
// ensure images are set
|
||||
storedFront, err := mqb.GetFrontImage(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting front image: %s", err.Error())
|
||||
}
|
||||
assert.Equal(t, storedFront, frontImage)
|
||||
|
||||
storedBack, err := mqb.GetBackImage(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting back image: %s", err.Error())
|
||||
}
|
||||
assert.Equal(t, storedBack, backImage)
|
||||
|
||||
// set front image only
|
||||
newImage := []byte("newImage")
|
||||
err = mqb.UpdateImages(ctx, created.ID, newImage, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating movie images: %s", err.Error())
|
||||
}
|
||||
|
||||
storedFront, err = mqb.GetFrontImage(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting front image: %s", err.Error())
|
||||
}
|
||||
assert.Equal(t, storedFront, newImage)
|
||||
|
||||
// back image should be nil
|
||||
storedBack, err = mqb.GetBackImage(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting back image: %s", err.Error())
|
||||
}
|
||||
assert.Nil(t, nil)
|
||||
|
||||
// set back image only
|
||||
err = mqb.UpdateImages(ctx, created.ID, nil, newImage)
|
||||
if err == nil {
|
||||
return fmt.Errorf("Expected error setting nil front image")
|
||||
}
|
||||
|
||||
return nil
|
||||
return testUpdateImage(t, ctx, movie.ID, qb.UpdateFrontImage, qb.GetFrontImage)
|
||||
}); err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMovieDestroyMovieImages(t *testing.T) {
|
||||
if err := withTxn(func(ctx context.Context) error {
|
||||
mqb := sqlite.MovieReaderWriter
|
||||
func TestMovieUpdateBackImage(t *testing.T) {
|
||||
if err := withRollbackTxn(func(ctx context.Context) error {
|
||||
qb := db.Movie
|
||||
|
||||
// create movie to test against
|
||||
const name = "TestMovieDestroyMovieImages"
|
||||
movie := models.Movie{
|
||||
const name = "TestMovieUpdateMovieImages"
|
||||
toCreate := models.Movie{
|
||||
Name: sql.NullString{String: name, Valid: true},
|
||||
Checksum: md5.FromString(name),
|
||||
}
|
||||
created, err := mqb.Create(ctx, movie)
|
||||
movie, err := qb.Create(ctx, toCreate)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating movie: %s", err.Error())
|
||||
}
|
||||
|
||||
frontImage := []byte("frontImage")
|
||||
backImage := []byte("backImage")
|
||||
err = mqb.UpdateImages(ctx, created.ID, frontImage, backImage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating movie images: %s", err.Error())
|
||||
}
|
||||
|
||||
err = mqb.DestroyImages(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error destroying movie images: %s", err.Error())
|
||||
}
|
||||
|
||||
// front image should be nil
|
||||
storedFront, err := mqb.GetFrontImage(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting front image: %s", err.Error())
|
||||
}
|
||||
assert.Nil(t, storedFront)
|
||||
|
||||
// back image should be nil
|
||||
storedBack, err := mqb.GetBackImage(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting back image: %s", err.Error())
|
||||
}
|
||||
assert.Nil(t, storedBack)
|
||||
|
||||
return nil
|
||||
return testUpdateImage(t, ctx, movie.ID, qb.UpdateBackImage, qb.GetBackImage)
|
||||
}); err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// TODO Update
|
||||
// TODO Destroy
|
||||
// TODO Destroy - ensure image is destroyed
|
||||
// TODO Find
|
||||
// TODO Count
|
||||
// TODO All
|
||||
|
||||
@@ -23,7 +23,8 @@ const (
|
||||
performersAliasesTable = "performer_aliases"
|
||||
performerAliasColumn = "alias"
|
||||
performersTagsTable = "performers_tags"
|
||||
performersImageTable = "performers_image" // performer cover image
|
||||
|
||||
performerImageBlobColumn = "image_blob"
|
||||
)
|
||||
|
||||
type performerRow struct {
|
||||
@@ -54,6 +55,9 @@ type performerRow struct {
|
||||
HairColor zero.String `db:"hair_color"`
|
||||
Weight null.Int `db:"weight"`
|
||||
IgnoreAutoTag bool `db:"ignore_auto_tag"`
|
||||
|
||||
// not used for resolution
|
||||
ImageBlob zero.String `db:"image_blob"`
|
||||
}
|
||||
|
||||
func (r *performerRow) fromPerformer(o models.Performer) {
|
||||
@@ -159,16 +163,21 @@ func (r *performerRowRecord) fromPartial(o models.PerformerPartial) {
|
||||
|
||||
type PerformerStore struct {
|
||||
repository
|
||||
blobJoinQueryBuilder
|
||||
|
||||
tableMgr *table
|
||||
}
|
||||
|
||||
func NewPerformerStore() *PerformerStore {
|
||||
func NewPerformerStore(blobStore *BlobStore) *PerformerStore {
|
||||
return &PerformerStore{
|
||||
repository: repository{
|
||||
tableName: performerTable,
|
||||
idColumn: idColumn,
|
||||
},
|
||||
blobJoinQueryBuilder: blobJoinQueryBuilder{
|
||||
blobStore: blobStore,
|
||||
joinTable: performerTable,
|
||||
},
|
||||
tableMgr: performerTableMgr,
|
||||
}
|
||||
}
|
||||
@@ -275,6 +284,11 @@ func (qb *PerformerStore) Update(ctx context.Context, updatedObject *models.Perf
|
||||
}
|
||||
|
||||
func (qb *PerformerStore) Destroy(ctx context.Context, id int) error {
|
||||
// must handle image checksums manually
|
||||
if err := qb.DestroyImage(ctx, id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return qb.destroyExisting(ctx, []int{id})
|
||||
}
|
||||
|
||||
@@ -690,8 +704,7 @@ func performerIsMissingCriterionHandler(qb *PerformerStore, isMissing *string) c
|
||||
f.addLeftJoin(performersScenesTable, "scenes_join", "scenes_join.performer_id = performers.id")
|
||||
f.addWhere("scenes_join.scene_id IS NULL")
|
||||
case "image":
|
||||
f.addLeftJoin(performersImageTable, "image_join", "image_join.performer_id = performers.id")
|
||||
f.addWhere("image_join.performer_id IS NULL")
|
||||
f.addWhere("performers.image_blob IS NULL")
|
||||
case "stash_id":
|
||||
performersStashIDsTableMgr.join(f, "performer_stash_ids", "performers.id")
|
||||
f.addWhere("performer_stash_ids.performer_id IS NULL")
|
||||
@@ -911,27 +924,16 @@ func (qb *PerformerStore) GetTagIDs(ctx context.Context, id int) ([]int, error)
|
||||
return qb.tagsRepository().getIDs(ctx, id)
|
||||
}
|
||||
|
||||
func (qb *PerformerStore) imageRepository() *imageRepository {
|
||||
return &imageRepository{
|
||||
repository: repository{
|
||||
tx: qb.tx,
|
||||
tableName: "performers_image",
|
||||
idColumn: performerIDColumn,
|
||||
},
|
||||
imageColumn: "image",
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *PerformerStore) GetImage(ctx context.Context, performerID int) ([]byte, error) {
|
||||
return qb.imageRepository().get(ctx, performerID)
|
||||
return qb.blobJoinQueryBuilder.GetImage(ctx, performerID, performerImageBlobColumn)
|
||||
}
|
||||
|
||||
func (qb *PerformerStore) UpdateImage(ctx context.Context, performerID int, image []byte) error {
|
||||
return qb.imageRepository().replace(ctx, performerID, image)
|
||||
return qb.blobJoinQueryBuilder.UpdateImage(ctx, performerID, performerImageBlobColumn, image)
|
||||
}
|
||||
|
||||
func (qb *PerformerStore) DestroyImage(ctx context.Context, performerID int) error {
|
||||
return qb.imageRepository().destroy(ctx, []int{performerID})
|
||||
return qb.blobJoinQueryBuilder.DestroyImage(ctx, performerID, performerImageBlobColumn)
|
||||
}
|
||||
|
||||
func (qb *PerformerStore) stashIDRepository() *stashIDRepository {
|
||||
|
||||
@@ -1029,26 +1029,7 @@ func TestPerformerUpdatePerformerImage(t *testing.T) {
|
||||
return fmt.Errorf("Error creating performer: %s", err.Error())
|
||||
}
|
||||
|
||||
image := []byte("image")
|
||||
err = qb.UpdateImage(ctx, performer.ID, image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating performer image: %s", err.Error())
|
||||
}
|
||||
|
||||
// ensure image set
|
||||
storedImage, err := qb.GetImage(ctx, performer.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting image: %s", err.Error())
|
||||
}
|
||||
assert.Equal(t, storedImage, image)
|
||||
|
||||
// set nil image
|
||||
err = qb.UpdateImage(ctx, performer.ID, nil)
|
||||
if err == nil {
|
||||
return fmt.Errorf("Expected error setting nil image")
|
||||
}
|
||||
|
||||
return nil
|
||||
return testUpdateImage(t, ctx, performer.ID, qb.UpdateImage, qb.GetImage)
|
||||
}); err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
|
||||
@@ -387,29 +387,6 @@ func (r *joinRepository) replace(ctx context.Context, id int, foreignIDs []int)
|
||||
return nil
|
||||
}
|
||||
|
||||
type imageRepository struct {
|
||||
repository
|
||||
imageColumn string
|
||||
}
|
||||
|
||||
func (r *imageRepository) get(ctx context.Context, id int) ([]byte, error) {
|
||||
query := fmt.Sprintf("SELECT %s from %s WHERE %s = ?", r.imageColumn, r.tableName, r.idColumn)
|
||||
var ret []byte
|
||||
err := r.querySimple(ctx, query, []interface{}{id}, &ret)
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (r *imageRepository) replace(ctx context.Context, id int, image []byte) error {
|
||||
if err := r.destroy(ctx, []int{id}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stmt := fmt.Sprintf("INSERT INTO %s (%s, %s) VALUES (?, ?)", r.tableName, r.idColumn, r.imageColumn)
|
||||
_, err := r.tx.Exec(ctx, stmt, id, image)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type captionRepository struct {
|
||||
repository
|
||||
}
|
||||
|
||||
@@ -31,6 +31,8 @@ const (
|
||||
scenesTagsTable = "scenes_tags"
|
||||
scenesGalleriesTable = "scenes_galleries"
|
||||
moviesScenesTable = "movies_scenes"
|
||||
|
||||
sceneCoverBlobColumn = "cover_blob"
|
||||
)
|
||||
|
||||
var findExactDuplicateQuery = `
|
||||
@@ -72,6 +74,9 @@ type sceneRow struct {
|
||||
ResumeTime float64 `db:"resume_time"`
|
||||
PlayDuration float64 `db:"play_duration"`
|
||||
PlayCount int `db:"play_count"`
|
||||
|
||||
// not used in resolutions or updates
|
||||
CoverBlob zero.String `db:"cover_blob"`
|
||||
}
|
||||
|
||||
func (r *sceneRow) fromScene(o models.Scene) {
|
||||
@@ -172,6 +177,7 @@ func (r *sceneRowRecord) fromPartial(o models.ScenePartial) {
|
||||
|
||||
type SceneStore struct {
|
||||
repository
|
||||
blobJoinQueryBuilder
|
||||
|
||||
tableMgr *table
|
||||
oCounterManager
|
||||
@@ -179,12 +185,16 @@ type SceneStore struct {
|
||||
fileStore *FileStore
|
||||
}
|
||||
|
||||
func NewSceneStore(fileStore *FileStore) *SceneStore {
|
||||
func NewSceneStore(fileStore *FileStore, blobStore *BlobStore) *SceneStore {
|
||||
return &SceneStore{
|
||||
repository: repository{
|
||||
tableName: sceneTable,
|
||||
idColumn: idColumn,
|
||||
},
|
||||
blobJoinQueryBuilder: blobJoinQueryBuilder{
|
||||
blobStore: blobStore,
|
||||
joinTable: sceneTable,
|
||||
},
|
||||
|
||||
tableMgr: sceneTableMgr,
|
||||
oCounterManager: oCounterManager{sceneTableMgr},
|
||||
@@ -353,6 +363,11 @@ func (qb *SceneStore) Update(ctx context.Context, updatedObject *models.Scene) e
|
||||
}
|
||||
|
||||
func (qb *SceneStore) Destroy(ctx context.Context, id int) error {
|
||||
// must handle image checksums manually
|
||||
if err := qb.destroyCover(ctx, id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// scene markers should be handled prior to calling destroy
|
||||
// galleries should be handled prior to calling destroy
|
||||
|
||||
@@ -1187,6 +1202,8 @@ func sceneIsMissingCriterionHandler(qb *SceneStore, isMissing *string) criterion
|
||||
qb.addSceneFilesTable(f)
|
||||
f.addLeftJoin(fingerprintTable, "fingerprints_phash", "scenes_files.file_id = fingerprints_phash.file_id AND fingerprints_phash.type = 'phash'")
|
||||
f.addWhere("fingerprints_phash.fingerprint IS NULL")
|
||||
case "cover":
|
||||
f.addWhere("scenes.cover_blob IS NULL")
|
||||
default:
|
||||
f.addWhere("(scenes." + *isMissing + " IS NULL OR TRIM(scenes." + *isMissing + ") = '')")
|
||||
}
|
||||
@@ -1464,17 +1481,6 @@ func (qb *SceneStore) setSceneSort(query *queryBuilder, findFilter *models.FindF
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *SceneStore) imageRepository() *imageRepository {
|
||||
return &imageRepository{
|
||||
repository: repository{
|
||||
tx: qb.tx,
|
||||
tableName: "scenes_cover",
|
||||
idColumn: sceneIDColumn,
|
||||
},
|
||||
imageColumn: "cover",
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *SceneStore) getPlayCount(ctx context.Context, id int) (int, error) {
|
||||
q := dialect.From(qb.tableMgr.table).Select("play_count").Where(goqu.Ex{"id": id})
|
||||
|
||||
@@ -1532,15 +1538,19 @@ func (qb *SceneStore) IncrementWatchCount(ctx context.Context, id int) (int, err
|
||||
}
|
||||
|
||||
func (qb *SceneStore) GetCover(ctx context.Context, sceneID int) ([]byte, error) {
|
||||
return qb.imageRepository().get(ctx, sceneID)
|
||||
return qb.GetImage(ctx, sceneID, sceneCoverBlobColumn)
|
||||
}
|
||||
|
||||
func (qb *SceneStore) HasCover(ctx context.Context, sceneID int) (bool, error) {
|
||||
return qb.HasImage(ctx, sceneID, sceneCoverBlobColumn)
|
||||
}
|
||||
|
||||
func (qb *SceneStore) UpdateCover(ctx context.Context, sceneID int, image []byte) error {
|
||||
return qb.imageRepository().replace(ctx, sceneID, image)
|
||||
return qb.UpdateImage(ctx, sceneID, sceneCoverBlobColumn, image)
|
||||
}
|
||||
|
||||
func (qb *SceneStore) DestroyCover(ctx context.Context, sceneID int) error {
|
||||
return qb.imageRepository().destroy(ctx, []int{sceneID})
|
||||
func (qb *SceneStore) destroyCover(ctx context.Context, sceneID int) error {
|
||||
return qb.DestroyImage(ctx, sceneID, sceneCoverBlobColumn)
|
||||
}
|
||||
|
||||
func (qb *SceneStore) AssignFiles(ctx context.Context, sceneID int, fileIDs []file.ID) error {
|
||||
|
||||
@@ -4088,53 +4088,7 @@ func TestSceneUpdateSceneCover(t *testing.T) {
|
||||
|
||||
sceneID := sceneIDs[sceneIdxWithGallery]
|
||||
|
||||
image := []byte("image")
|
||||
if err := qb.UpdateCover(ctx, sceneID, image); err != nil {
|
||||
return fmt.Errorf("Error updating scene cover: %s", err.Error())
|
||||
}
|
||||
|
||||
// ensure image set
|
||||
storedImage, err := qb.GetCover(ctx, sceneID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting image: %s", err.Error())
|
||||
}
|
||||
assert.Equal(t, storedImage, image)
|
||||
|
||||
// set nil image
|
||||
err = qb.UpdateCover(ctx, sceneID, nil)
|
||||
if err == nil {
|
||||
return fmt.Errorf("Expected error setting nil image")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestSceneDestroySceneCover(t *testing.T) {
|
||||
if err := withTxn(func(ctx context.Context) error {
|
||||
qb := db.Scene
|
||||
|
||||
sceneID := sceneIDs[sceneIdxWithGallery]
|
||||
|
||||
image := []byte("image")
|
||||
if err := qb.UpdateCover(ctx, sceneID, image); err != nil {
|
||||
return fmt.Errorf("Error updating scene image: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := qb.DestroyCover(ctx, sceneID); err != nil {
|
||||
return fmt.Errorf("Error destroying scene cover: %s", err.Error())
|
||||
}
|
||||
|
||||
// image should be nil
|
||||
storedImage, err := qb.GetCover(ctx, sceneID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting image: %s", err.Error())
|
||||
}
|
||||
assert.Nil(t, storedImage)
|
||||
|
||||
return nil
|
||||
return testUpdateImage(t, ctx, sceneID, qb.UpdateCover, qb.GetCover)
|
||||
}); err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
|
||||
@@ -537,6 +537,10 @@ func runTests(m *testing.M) int {
|
||||
f.Close()
|
||||
databaseFile := f.Name()
|
||||
db = sqlite.NewDatabase()
|
||||
db.SetBlobStoreOptions(sqlite.BlobStoreOptions{
|
||||
UseDatabase: true,
|
||||
// don't use filesystem
|
||||
})
|
||||
|
||||
if err := db.Open(databaseFile); err != nil {
|
||||
panic(fmt.Sprintf("Could not initialize database: %s", err.Error()))
|
||||
@@ -566,11 +570,11 @@ func populateDB() error {
|
||||
|
||||
// TODO - link folders to zip files
|
||||
|
||||
if err := createMovies(ctx, sqlite.MovieReaderWriter, moviesNameCase, moviesNameNoCase); err != nil {
|
||||
if err := createMovies(ctx, db.Movie, moviesNameCase, moviesNameNoCase); err != nil {
|
||||
return fmt.Errorf("error creating movies: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := createTags(ctx, sqlite.TagReaderWriter, tagsNameCase, tagsNameNoCase); err != nil {
|
||||
if err := createTags(ctx, db.Tag, tagsNameCase, tagsNameNoCase); err != nil {
|
||||
return fmt.Errorf("error creating tags: %s", err.Error())
|
||||
}
|
||||
|
||||
@@ -578,7 +582,7 @@ func populateDB() error {
|
||||
return fmt.Errorf("error creating performers: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := createStudios(ctx, sqlite.StudioReaderWriter, studiosNameCase, studiosNameNoCase); err != nil {
|
||||
if err := createStudios(ctx, db.Studio, studiosNameCase, studiosNameNoCase); err != nil {
|
||||
return fmt.Errorf("error creating studios: %s", err.Error())
|
||||
}
|
||||
|
||||
@@ -594,7 +598,7 @@ func populateDB() error {
|
||||
return fmt.Errorf("error creating images: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := addTagImage(ctx, sqlite.TagReaderWriter, tagIdxWithCoverImage); err != nil {
|
||||
if err := addTagImage(ctx, db.Tag, tagIdxWithCoverImage); err != nil {
|
||||
return fmt.Errorf("error adding tag image: %s", err.Error())
|
||||
}
|
||||
|
||||
@@ -602,15 +606,15 @@ func populateDB() error {
|
||||
return fmt.Errorf("error creating saved filters: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := linkMovieStudios(ctx, sqlite.MovieReaderWriter); err != nil {
|
||||
if err := linkMovieStudios(ctx, db.Movie); err != nil {
|
||||
return fmt.Errorf("error linking movie studios: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := linkStudiosParent(ctx, sqlite.StudioReaderWriter); err != nil {
|
||||
if err := linkStudiosParent(ctx, db.Studio); err != nil {
|
||||
return fmt.Errorf("error linking studios parent: %s", err.Error())
|
||||
}
|
||||
|
||||
if err := linkTagsParent(ctx, sqlite.TagReaderWriter); err != nil {
|
||||
if err := linkTagsParent(ctx, db.Tag); err != nil {
|
||||
return fmt.Errorf("error linking tags parent: %s", err.Error())
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package sqlite
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"regexp"
|
||||
@@ -290,28 +287,6 @@ func getCountCriterionClause(primaryTable, joinTable, primaryFK string, criterio
|
||||
return getIntCriterionWhereClause(lhs, criterion)
|
||||
}
|
||||
|
||||
func getImage(ctx context.Context, tx dbWrapper, query string, args ...interface{}) ([]byte, error) {
|
||||
rows, err := tx.Queryx(ctx, query, args...)
|
||||
|
||||
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var ret []byte
|
||||
if rows.Next() {
|
||||
if err := rows.Scan(&ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func coalesce(column string) string {
|
||||
return fmt.Sprintf("COALESCE(%s, '')", column)
|
||||
}
|
||||
|
||||
@@ -13,20 +13,31 @@ import (
|
||||
"github.com/stashapp/stash/pkg/sliceutil/intslice"
|
||||
)
|
||||
|
||||
const studioTable = "studios"
|
||||
const studioIDColumn = "studio_id"
|
||||
const studioAliasesTable = "studio_aliases"
|
||||
const studioAliasColumn = "alias"
|
||||
const (
|
||||
studioTable = "studios"
|
||||
studioIDColumn = "studio_id"
|
||||
studioAliasesTable = "studio_aliases"
|
||||
studioAliasColumn = "alias"
|
||||
|
||||
studioImageBlobColumn = "image_blob"
|
||||
)
|
||||
|
||||
type studioQueryBuilder struct {
|
||||
repository
|
||||
blobJoinQueryBuilder
|
||||
}
|
||||
|
||||
var StudioReaderWriter = &studioQueryBuilder{
|
||||
repository{
|
||||
tableName: studioTable,
|
||||
idColumn: idColumn,
|
||||
},
|
||||
func NewStudioReaderWriter(blobStore *BlobStore) *studioQueryBuilder {
|
||||
return &studioQueryBuilder{
|
||||
repository{
|
||||
tableName: studioTable,
|
||||
idColumn: idColumn,
|
||||
},
|
||||
blobJoinQueryBuilder{
|
||||
blobStore: blobStore,
|
||||
joinTable: studioTable,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *studioQueryBuilder) Create(ctx context.Context, newObject models.Studio) (*models.Studio, error) {
|
||||
@@ -57,6 +68,11 @@ func (qb *studioQueryBuilder) UpdateFull(ctx context.Context, updatedObject mode
|
||||
}
|
||||
|
||||
func (qb *studioQueryBuilder) Destroy(ctx context.Context, id int) error {
|
||||
// must handle image checksums manually
|
||||
if err := qb.destroyImage(ctx, id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO - set null on foreign key in scraped items
|
||||
// remove studio from scraped items
|
||||
_, err := qb.tx.Exec(ctx, "UPDATE scraped_items SET studio_id = null WHERE studio_id = ?", id)
|
||||
@@ -428,31 +444,20 @@ func (qb *studioQueryBuilder) queryStudios(ctx context.Context, query string, ar
|
||||
return []*models.Studio(ret), nil
|
||||
}
|
||||
|
||||
func (qb *studioQueryBuilder) imageRepository() *imageRepository {
|
||||
return &imageRepository{
|
||||
repository: repository{
|
||||
tx: qb.tx,
|
||||
tableName: "studios_image",
|
||||
idColumn: studioIDColumn,
|
||||
},
|
||||
imageColumn: "image",
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *studioQueryBuilder) GetImage(ctx context.Context, studioID int) ([]byte, error) {
|
||||
return qb.imageRepository().get(ctx, studioID)
|
||||
return qb.blobJoinQueryBuilder.GetImage(ctx, studioID, studioImageBlobColumn)
|
||||
}
|
||||
|
||||
func (qb *studioQueryBuilder) HasImage(ctx context.Context, studioID int) (bool, error) {
|
||||
return qb.imageRepository().exists(ctx, studioID)
|
||||
return qb.blobJoinQueryBuilder.HasImage(ctx, studioID, studioImageBlobColumn)
|
||||
}
|
||||
|
||||
func (qb *studioQueryBuilder) UpdateImage(ctx context.Context, studioID int, image []byte) error {
|
||||
return qb.imageRepository().replace(ctx, studioID, image)
|
||||
return qb.blobJoinQueryBuilder.UpdateImage(ctx, studioID, studioImageBlobColumn, image)
|
||||
}
|
||||
|
||||
func (qb *studioQueryBuilder) DestroyImage(ctx context.Context, studioID int) error {
|
||||
return qb.imageRepository().destroy(ctx, []int{studioID})
|
||||
func (qb *studioQueryBuilder) destroyImage(ctx context.Context, studioID int) error {
|
||||
return qb.blobJoinQueryBuilder.DestroyImage(ctx, studioID, studioImageBlobColumn)
|
||||
}
|
||||
|
||||
func (qb *studioQueryBuilder) stashIDRepository() *stashIDRepository {
|
||||
|
||||
@@ -14,13 +14,12 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/sqlite"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestStudioFindByName(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
|
||||
name := studioNames[studioIdxWithScene] // find a studio by name
|
||||
|
||||
@@ -70,7 +69,7 @@ func TestStudioQueryNameOr(t *testing.T) {
|
||||
}
|
||||
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
|
||||
studios := queryStudio(ctx, t, sqb, &studioFilter, nil)
|
||||
|
||||
@@ -101,7 +100,7 @@ func TestStudioQueryNameAndUrl(t *testing.T) {
|
||||
}
|
||||
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
|
||||
studios := queryStudio(ctx, t, sqb, &studioFilter, nil)
|
||||
|
||||
@@ -136,7 +135,7 @@ func TestStudioQueryNameNotUrl(t *testing.T) {
|
||||
}
|
||||
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
|
||||
studios := queryStudio(ctx, t, sqb, &studioFilter, nil)
|
||||
|
||||
@@ -167,7 +166,7 @@ func TestStudioIllegalQuery(t *testing.T) {
|
||||
}
|
||||
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
|
||||
_, _, err := sqb.Query(ctx, studioFilter, nil)
|
||||
assert.NotNil(err)
|
||||
@@ -193,7 +192,7 @@ func TestStudioQueryIgnoreAutoTag(t *testing.T) {
|
||||
IgnoreAutoTag: &ignoreAutoTag,
|
||||
}
|
||||
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
|
||||
studios := queryStudio(ctx, t, sqb, &studioFilter, nil)
|
||||
|
||||
@@ -208,7 +207,7 @@ func TestStudioQueryIgnoreAutoTag(t *testing.T) {
|
||||
|
||||
func TestStudioQueryForAutoTag(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
tqb := sqlite.StudioReaderWriter
|
||||
tqb := db.Studio
|
||||
|
||||
name := studioNames[studioIdxWithMovie] // find a studio by name
|
||||
|
||||
@@ -239,7 +238,7 @@ func TestStudioQueryForAutoTag(t *testing.T) {
|
||||
|
||||
func TestStudioQueryParent(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
studioCriterion := models.MultiCriterionInput{
|
||||
Value: []string{
|
||||
strconv.Itoa(studioIDs[studioIdxWithChildStudio]),
|
||||
@@ -289,18 +288,18 @@ func TestStudioDestroyParent(t *testing.T) {
|
||||
|
||||
// create parent and child studios
|
||||
if err := withTxn(func(ctx context.Context) error {
|
||||
createdParent, err := createStudio(ctx, sqlite.StudioReaderWriter, parentName, nil)
|
||||
createdParent, err := createStudio(ctx, db.Studio, parentName, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating parent studio: %s", err.Error())
|
||||
}
|
||||
|
||||
parentID := int64(createdParent.ID)
|
||||
createdChild, err := createStudio(ctx, sqlite.StudioReaderWriter, childName, &parentID)
|
||||
createdChild, err := createStudio(ctx, db.Studio, childName, &parentID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating child studio: %s", err.Error())
|
||||
}
|
||||
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
|
||||
// destroy the parent
|
||||
err = sqb.Destroy(ctx, createdParent.ID)
|
||||
@@ -322,7 +321,7 @@ func TestStudioDestroyParent(t *testing.T) {
|
||||
|
||||
func TestStudioFindChildren(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
|
||||
studios, err := sqb.FindChildren(ctx, studioIDs[studioIdxWithChildStudio])
|
||||
|
||||
@@ -351,18 +350,18 @@ func TestStudioUpdateClearParent(t *testing.T) {
|
||||
|
||||
// create parent and child studios
|
||||
if err := withTxn(func(ctx context.Context) error {
|
||||
createdParent, err := createStudio(ctx, sqlite.StudioReaderWriter, parentName, nil)
|
||||
createdParent, err := createStudio(ctx, db.Studio, parentName, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating parent studio: %s", err.Error())
|
||||
}
|
||||
|
||||
parentID := int64(createdParent.ID)
|
||||
createdChild, err := createStudio(ctx, sqlite.StudioReaderWriter, childName, &parentID)
|
||||
createdChild, err := createStudio(ctx, db.Studio, childName, &parentID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating child studio: %s", err.Error())
|
||||
}
|
||||
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
|
||||
// clear the parent id from the child
|
||||
updatePartial := models.StudioPartial{
|
||||
@@ -388,70 +387,16 @@ func TestStudioUpdateClearParent(t *testing.T) {
|
||||
|
||||
func TestStudioUpdateStudioImage(t *testing.T) {
|
||||
if err := withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.StudioReaderWriter
|
||||
qb := db.Studio
|
||||
|
||||
// create performer to test against
|
||||
// create studio to test against
|
||||
const name = "TestStudioUpdateStudioImage"
|
||||
created, err := createStudio(ctx, sqlite.StudioReaderWriter, name, nil)
|
||||
created, err := createStudio(ctx, db.Studio, name, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating studio: %s", err.Error())
|
||||
}
|
||||
|
||||
image := []byte("image")
|
||||
err = qb.UpdateImage(ctx, created.ID, image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating studio image: %s", err.Error())
|
||||
}
|
||||
|
||||
// ensure image set
|
||||
storedImage, err := qb.GetImage(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting image: %s", err.Error())
|
||||
}
|
||||
assert.Equal(t, storedImage, image)
|
||||
|
||||
// set nil image
|
||||
err = qb.UpdateImage(ctx, created.ID, nil)
|
||||
if err == nil {
|
||||
return fmt.Errorf("Expected error setting nil image")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestStudioDestroyStudioImage(t *testing.T) {
|
||||
if err := withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.StudioReaderWriter
|
||||
|
||||
// create performer to test against
|
||||
const name = "TestStudioDestroyStudioImage"
|
||||
created, err := createStudio(ctx, sqlite.StudioReaderWriter, name, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating studio: %s", err.Error())
|
||||
}
|
||||
|
||||
image := []byte("image")
|
||||
err = qb.UpdateImage(ctx, created.ID, image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating studio image: %s", err.Error())
|
||||
}
|
||||
|
||||
err = qb.DestroyImage(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error destroying studio image: %s", err.Error())
|
||||
}
|
||||
|
||||
// image should be nil
|
||||
storedImage, err := qb.GetImage(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting image: %s", err.Error())
|
||||
}
|
||||
assert.Nil(t, storedImage)
|
||||
|
||||
return nil
|
||||
return testUpdateImage(t, ctx, created.ID, qb.UpdateImage, qb.GetImage)
|
||||
}); err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
@@ -478,7 +423,7 @@ func TestStudioQuerySceneCount(t *testing.T) {
|
||||
|
||||
func verifyStudiosSceneCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
studioFilter := models.StudioFilterType{
|
||||
SceneCount: &sceneCountCriterion,
|
||||
}
|
||||
@@ -519,7 +464,7 @@ func TestStudioQueryImageCount(t *testing.T) {
|
||||
|
||||
func verifyStudiosImageCount(t *testing.T, imageCountCriterion models.IntCriterionInput) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
studioFilter := models.StudioFilterType{
|
||||
ImageCount: &imageCountCriterion,
|
||||
}
|
||||
@@ -575,7 +520,7 @@ func TestStudioQueryGalleryCount(t *testing.T) {
|
||||
|
||||
func verifyStudiosGalleryCount(t *testing.T, galleryCountCriterion models.IntCriterionInput) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
studioFilter := models.StudioFilterType{
|
||||
GalleryCount: &galleryCountCriterion,
|
||||
}
|
||||
@@ -606,11 +551,11 @@ func verifyStudiosGalleryCount(t *testing.T, galleryCountCriterion models.IntCri
|
||||
|
||||
func TestStudioStashIDs(t *testing.T) {
|
||||
if err := withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.StudioReaderWriter
|
||||
qb := db.Studio
|
||||
|
||||
// create studio to test against
|
||||
const name = "TestStudioStashIDs"
|
||||
created, err := createStudio(ctx, sqlite.StudioReaderWriter, name, nil)
|
||||
created, err := createStudio(ctx, db.Studio, name, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating studio: %s", err.Error())
|
||||
}
|
||||
@@ -688,7 +633,7 @@ func TestStudioQueryRating(t *testing.T) {
|
||||
func verifyStudioQuery(t *testing.T, filter models.StudioFilterType, verifyFn func(ctx context.Context, s *models.Studio)) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
t.Helper()
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
|
||||
studios := queryStudio(ctx, t, sqb, &filter, nil)
|
||||
|
||||
@@ -705,7 +650,7 @@ func verifyStudioQuery(t *testing.T, filter models.StudioFilterType, verifyFn fu
|
||||
|
||||
func verifyStudiosRating(t *testing.T, ratingCriterion models.IntCriterionInput) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
studioFilter := models.StudioFilterType{
|
||||
Rating: &ratingCriterion,
|
||||
}
|
||||
@@ -726,7 +671,7 @@ func verifyStudiosRating(t *testing.T, ratingCriterion models.IntCriterionInput)
|
||||
|
||||
func TestStudioQueryIsMissingRating(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
isMissing := "rating"
|
||||
studioFilter := models.StudioFilterType{
|
||||
IsMissing: &isMissing,
|
||||
@@ -802,7 +747,7 @@ func TestStudioQueryAlias(t *testing.T) {
|
||||
|
||||
verifyFn := func(ctx context.Context, studio *models.Studio) {
|
||||
t.Helper()
|
||||
aliases, err := sqlite.StudioReaderWriter.GetAliases(ctx, studio.ID)
|
||||
aliases, err := db.Studio.GetAliases(ctx, studio.ID)
|
||||
if err != nil {
|
||||
t.Errorf("Error querying studios: %s", err.Error())
|
||||
}
|
||||
@@ -837,7 +782,7 @@ func TestStudioQueryAlias(t *testing.T) {
|
||||
|
||||
func TestStudioUpdateAlias(t *testing.T) {
|
||||
if err := withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.StudioReaderWriter
|
||||
qb := db.Studio
|
||||
|
||||
// create studio to test against
|
||||
const name = "TestStudioUpdateAlias"
|
||||
@@ -934,7 +879,7 @@ func TestStudioQueryFast(t *testing.T) {
|
||||
}
|
||||
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.StudioReaderWriter
|
||||
sqb := db.Studio
|
||||
for _, f := range filters {
|
||||
for _, ff := range findFilters {
|
||||
_, _, err := sqb.Query(ctx, &f, &ff)
|
||||
|
||||
@@ -234,3 +234,10 @@ var (
|
||||
idColumn: goqu.T(movieTable).Col(idColumn),
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
blobTableMgr = &table{
|
||||
table: goqu.T(blobTable),
|
||||
idColumn: goqu.T(blobTable).Col(blobChecksumColumn),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -13,20 +13,31 @@ import (
|
||||
"github.com/stashapp/stash/pkg/sliceutil/intslice"
|
||||
)
|
||||
|
||||
const tagTable = "tags"
|
||||
const tagIDColumn = "tag_id"
|
||||
const tagAliasesTable = "tag_aliases"
|
||||
const tagAliasColumn = "alias"
|
||||
const (
|
||||
tagTable = "tags"
|
||||
tagIDColumn = "tag_id"
|
||||
tagAliasesTable = "tag_aliases"
|
||||
tagAliasColumn = "alias"
|
||||
|
||||
tagImageBlobColumn = "image_blob"
|
||||
)
|
||||
|
||||
type tagQueryBuilder struct {
|
||||
repository
|
||||
blobJoinQueryBuilder
|
||||
}
|
||||
|
||||
var TagReaderWriter = &tagQueryBuilder{
|
||||
repository{
|
||||
tableName: tagTable,
|
||||
idColumn: idColumn,
|
||||
},
|
||||
func NewTagReaderWriter(blobStore *BlobStore) *tagQueryBuilder {
|
||||
return &tagQueryBuilder{
|
||||
repository{
|
||||
tableName: tagTable,
|
||||
idColumn: idColumn,
|
||||
},
|
||||
blobJoinQueryBuilder{
|
||||
blobStore: blobStore,
|
||||
joinTable: tagTable,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *tagQueryBuilder) Create(ctx context.Context, newObject models.Tag) (*models.Tag, error) {
|
||||
@@ -57,16 +68,8 @@ func (qb *tagQueryBuilder) UpdateFull(ctx context.Context, updatedObject models.
|
||||
}
|
||||
|
||||
func (qb *tagQueryBuilder) Destroy(ctx context.Context, id int) error {
|
||||
// TODO - add delete cascade to foreign key
|
||||
// delete tag from scenes and markers first
|
||||
_, err := qb.tx.Exec(ctx, "DELETE FROM scenes_tags WHERE tag_id = ?", id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO - add delete cascade to foreign key
|
||||
_, err = qb.tx.Exec(ctx, "DELETE FROM scene_markers_tags WHERE tag_id = ?", id)
|
||||
if err != nil {
|
||||
// must handle image checksums manually
|
||||
if err := qb.destroyImage(ctx, id); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -407,8 +410,7 @@ func tagIsMissingCriterionHandler(qb *tagQueryBuilder, isMissing *string) criter
|
||||
if isMissing != nil && *isMissing != "" {
|
||||
switch *isMissing {
|
||||
case "image":
|
||||
qb.imageRepository().join(f, "", "tags.id")
|
||||
f.addWhere("tags_image.tag_id IS NULL")
|
||||
f.addWhere("tags.image_blob IS NULL")
|
||||
default:
|
||||
f.addWhere("(tags." + *isMissing + " IS NULL OR TRIM(tags." + *isMissing + ") = '')")
|
||||
}
|
||||
@@ -642,31 +644,16 @@ func (qb *tagQueryBuilder) queryTags(ctx context.Context, query string, args []i
|
||||
return []*models.Tag(ret), nil
|
||||
}
|
||||
|
||||
func (qb *tagQueryBuilder) imageRepository() *imageRepository {
|
||||
return &imageRepository{
|
||||
repository: repository{
|
||||
tx: qb.tx,
|
||||
tableName: "tags_image",
|
||||
idColumn: tagIDColumn,
|
||||
},
|
||||
imageColumn: "image",
|
||||
}
|
||||
}
|
||||
|
||||
func (qb *tagQueryBuilder) GetImage(ctx context.Context, tagID int) ([]byte, error) {
|
||||
return qb.imageRepository().get(ctx, tagID)
|
||||
}
|
||||
|
||||
func (qb *tagQueryBuilder) HasImage(ctx context.Context, tagID int) (bool, error) {
|
||||
return qb.imageRepository().exists(ctx, tagID)
|
||||
return qb.blobJoinQueryBuilder.GetImage(ctx, tagID, tagImageBlobColumn)
|
||||
}
|
||||
|
||||
func (qb *tagQueryBuilder) UpdateImage(ctx context.Context, tagID int, image []byte) error {
|
||||
return qb.imageRepository().replace(ctx, tagID, image)
|
||||
return qb.blobJoinQueryBuilder.UpdateImage(ctx, tagID, tagImageBlobColumn, image)
|
||||
}
|
||||
|
||||
func (qb *tagQueryBuilder) DestroyImage(ctx context.Context, tagID int) error {
|
||||
return qb.imageRepository().destroy(ctx, []int{tagID})
|
||||
func (qb *tagQueryBuilder) destroyImage(ctx context.Context, tagID int) error {
|
||||
return qb.blobJoinQueryBuilder.DestroyImage(ctx, tagID, tagImageBlobColumn)
|
||||
}
|
||||
|
||||
func (qb *tagQueryBuilder) aliasRepository() *stringRepository {
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
func TestMarkerFindBySceneMarkerID(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
tqb := sqlite.TagReaderWriter
|
||||
tqb := db.Tag
|
||||
|
||||
markerID := markerIDs[markerIdxWithTag]
|
||||
|
||||
@@ -46,7 +46,7 @@ func TestMarkerFindBySceneMarkerID(t *testing.T) {
|
||||
|
||||
func TestTagFindByName(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
tqb := sqlite.TagReaderWriter
|
||||
tqb := db.Tag
|
||||
|
||||
name := tagNames[tagIdxWithScene] // find a tag by name
|
||||
|
||||
@@ -82,7 +82,7 @@ func TestTagQueryIgnoreAutoTag(t *testing.T) {
|
||||
IgnoreAutoTag: &ignoreAutoTag,
|
||||
}
|
||||
|
||||
sqb := sqlite.TagReaderWriter
|
||||
sqb := db.Tag
|
||||
|
||||
tags := queryTags(ctx, t, sqb, &tagFilter, nil)
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestTagQueryIgnoreAutoTag(t *testing.T) {
|
||||
|
||||
func TestTagQueryForAutoTag(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
tqb := sqlite.TagReaderWriter
|
||||
tqb := db.Tag
|
||||
|
||||
name := tagNames[tagIdx1WithScene] // find a tag by name
|
||||
|
||||
@@ -131,7 +131,7 @@ func TestTagFindByNames(t *testing.T) {
|
||||
var names []string
|
||||
|
||||
withTxn(func(ctx context.Context) error {
|
||||
tqb := sqlite.TagReaderWriter
|
||||
tqb := db.Tag
|
||||
|
||||
names = append(names, tagNames[tagIdxWithScene]) // find tags by names
|
||||
|
||||
@@ -176,7 +176,7 @@ func TestTagFindByNames(t *testing.T) {
|
||||
|
||||
func TestTagQuerySort(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.TagReaderWriter
|
||||
sqb := db.Tag
|
||||
|
||||
sortBy := "scenes_count"
|
||||
dir := models.SortDirectionEnumDesc
|
||||
@@ -253,7 +253,7 @@ func TestTagQueryAlias(t *testing.T) {
|
||||
}
|
||||
|
||||
verifyFn := func(ctx context.Context, tag *models.Tag) {
|
||||
aliases, err := sqlite.TagReaderWriter.GetAliases(ctx, tag.ID)
|
||||
aliases, err := db.Tag.GetAliases(ctx, tag.ID)
|
||||
if err != nil {
|
||||
t.Errorf("Error querying tags: %s", err.Error())
|
||||
}
|
||||
@@ -288,7 +288,7 @@ func TestTagQueryAlias(t *testing.T) {
|
||||
|
||||
func verifyTagQuery(t *testing.T, tagFilter *models.TagFilterType, findFilter *models.FindFilterType, verifyFn func(ctx context.Context, t *models.Tag)) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
sqb := sqlite.TagReaderWriter
|
||||
sqb := db.Tag
|
||||
|
||||
tags := queryTags(ctx, t, sqb, tagFilter, findFilter)
|
||||
|
||||
@@ -312,7 +312,7 @@ func queryTags(ctx context.Context, t *testing.T, qb models.TagReader, tagFilter
|
||||
|
||||
func TestTagQueryIsMissingImage(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.TagReaderWriter
|
||||
qb := db.Tag
|
||||
isMissing := "image"
|
||||
tagFilter := models.TagFilterType{
|
||||
IsMissing: &isMissing,
|
||||
@@ -366,7 +366,7 @@ func TestTagQuerySceneCount(t *testing.T) {
|
||||
|
||||
func verifyTagSceneCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.TagReaderWriter
|
||||
qb := db.Tag
|
||||
tagFilter := models.TagFilterType{
|
||||
SceneCount: &sceneCountCriterion,
|
||||
}
|
||||
@@ -408,7 +408,7 @@ func TestTagQueryMarkerCount(t *testing.T) {
|
||||
|
||||
func verifyTagMarkerCount(t *testing.T, markerCountCriterion models.IntCriterionInput) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.TagReaderWriter
|
||||
qb := db.Tag
|
||||
tagFilter := models.TagFilterType{
|
||||
MarkerCount: &markerCountCriterion,
|
||||
}
|
||||
@@ -450,7 +450,7 @@ func TestTagQueryImageCount(t *testing.T) {
|
||||
|
||||
func verifyTagImageCount(t *testing.T, imageCountCriterion models.IntCriterionInput) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.TagReaderWriter
|
||||
qb := db.Tag
|
||||
tagFilter := models.TagFilterType{
|
||||
ImageCount: &imageCountCriterion,
|
||||
}
|
||||
@@ -492,7 +492,7 @@ func TestTagQueryGalleryCount(t *testing.T) {
|
||||
|
||||
func verifyTagGalleryCount(t *testing.T, imageCountCriterion models.IntCriterionInput) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.TagReaderWriter
|
||||
qb := db.Tag
|
||||
tagFilter := models.TagFilterType{
|
||||
GalleryCount: &imageCountCriterion,
|
||||
}
|
||||
@@ -534,7 +534,7 @@ func TestTagQueryPerformerCount(t *testing.T) {
|
||||
|
||||
func verifyTagPerformerCount(t *testing.T, imageCountCriterion models.IntCriterionInput) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.TagReaderWriter
|
||||
qb := db.Tag
|
||||
tagFilter := models.TagFilterType{
|
||||
PerformerCount: &imageCountCriterion,
|
||||
}
|
||||
@@ -576,7 +576,7 @@ func TestTagQueryParentCount(t *testing.T) {
|
||||
|
||||
func verifyTagParentCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.TagReaderWriter
|
||||
qb := db.Tag
|
||||
tagFilter := models.TagFilterType{
|
||||
ParentCount: &sceneCountCriterion,
|
||||
}
|
||||
@@ -619,7 +619,7 @@ func TestTagQueryChildCount(t *testing.T) {
|
||||
|
||||
func verifyTagChildCount(t *testing.T, sceneCountCriterion models.IntCriterionInput) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.TagReaderWriter
|
||||
qb := db.Tag
|
||||
tagFilter := models.TagFilterType{
|
||||
ChildCount: &sceneCountCriterion,
|
||||
}
|
||||
@@ -644,7 +644,7 @@ func verifyTagChildCount(t *testing.T, sceneCountCriterion models.IntCriterionIn
|
||||
func TestTagQueryParent(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
const nameField = "Name"
|
||||
sqb := sqlite.TagReaderWriter
|
||||
sqb := db.Tag
|
||||
tagCriterion := models.HierarchicalMultiCriterionInput{
|
||||
Value: []string{
|
||||
strconv.Itoa(tagIDs[tagIdxWithChildTag]),
|
||||
@@ -722,7 +722,7 @@ func TestTagQueryChild(t *testing.T) {
|
||||
withTxn(func(ctx context.Context) error {
|
||||
const nameField = "Name"
|
||||
|
||||
sqb := sqlite.TagReaderWriter
|
||||
sqb := db.Tag
|
||||
tagCriterion := models.HierarchicalMultiCriterionInput{
|
||||
Value: []string{
|
||||
strconv.Itoa(tagIDs[tagIdxWithParentTag]),
|
||||
@@ -798,7 +798,7 @@ func TestTagQueryChild(t *testing.T) {
|
||||
|
||||
func TestTagUpdateTagImage(t *testing.T) {
|
||||
if err := withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.TagReaderWriter
|
||||
qb := db.Tag
|
||||
|
||||
// create tag to test against
|
||||
const name = "TestTagUpdateTagImage"
|
||||
@@ -810,64 +810,7 @@ func TestTagUpdateTagImage(t *testing.T) {
|
||||
return fmt.Errorf("Error creating tag: %s", err.Error())
|
||||
}
|
||||
|
||||
image := []byte("image")
|
||||
err = qb.UpdateImage(ctx, created.ID, image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating studio image: %s", err.Error())
|
||||
}
|
||||
|
||||
// ensure image set
|
||||
storedImage, err := qb.GetImage(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting image: %s", err.Error())
|
||||
}
|
||||
assert.Equal(t, storedImage, image)
|
||||
|
||||
// set nil image
|
||||
err = qb.UpdateImage(ctx, created.ID, nil)
|
||||
if err == nil {
|
||||
return fmt.Errorf("Expected error setting nil image")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagDestroyTagImage(t *testing.T) {
|
||||
if err := withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.TagReaderWriter
|
||||
|
||||
// create performer to test against
|
||||
const name = "TestTagDestroyTagImage"
|
||||
tag := models.Tag{
|
||||
Name: name,
|
||||
}
|
||||
created, err := qb.Create(ctx, tag)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating tag: %s", err.Error())
|
||||
}
|
||||
|
||||
image := []byte("image")
|
||||
err = qb.UpdateImage(ctx, created.ID, image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error updating studio image: %s", err.Error())
|
||||
}
|
||||
|
||||
err = qb.DestroyImage(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error destroying studio image: %s", err.Error())
|
||||
}
|
||||
|
||||
// image should be nil
|
||||
storedImage, err := qb.GetImage(ctx, created.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error getting image: %s", err.Error())
|
||||
}
|
||||
assert.Nil(t, storedImage)
|
||||
|
||||
return nil
|
||||
return testUpdateImage(t, ctx, created.ID, qb.UpdateImage, qb.GetImage)
|
||||
}); err != nil {
|
||||
t.Error(err.Error())
|
||||
}
|
||||
@@ -875,7 +818,7 @@ func TestTagDestroyTagImage(t *testing.T) {
|
||||
|
||||
func TestTagUpdateAlias(t *testing.T) {
|
||||
if err := withTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.TagReaderWriter
|
||||
qb := db.Tag
|
||||
|
||||
// create tag to test against
|
||||
const name = "TestTagUpdateAlias"
|
||||
@@ -911,7 +854,7 @@ func TestTagMerge(t *testing.T) {
|
||||
|
||||
// merge tests - perform these in a transaction that we'll rollback
|
||||
if err := withRollbackTxn(func(ctx context.Context) error {
|
||||
qb := sqlite.TagReaderWriter
|
||||
qb := db.Tag
|
||||
|
||||
// try merging into same tag
|
||||
err := qb.Merge(ctx, []int{tagIDs[tagIdx1WithScene]}, tagIDs[tagIdx1WithScene])
|
||||
|
||||
@@ -131,13 +131,13 @@ func (db *Database) TxnRepository() models.Repository {
|
||||
Gallery: db.Gallery,
|
||||
GalleryChapter: GalleryChapterReaderWriter,
|
||||
Image: db.Image,
|
||||
Movie: MovieReaderWriter,
|
||||
Movie: db.Movie,
|
||||
Performer: db.Performer,
|
||||
Scene: db.Scene,
|
||||
SceneMarker: SceneMarkerReaderWriter,
|
||||
ScrapedItem: ScrapedItemReaderWriter,
|
||||
Studio: StudioReaderWriter,
|
||||
Tag: TagReaderWriter,
|
||||
Studio: db.Studio,
|
||||
Tag: db.Tag,
|
||||
SavedFilter: SavedFilterReaderWriter,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,9 +11,10 @@ const (
|
||||
)
|
||||
|
||||
type hookManager struct {
|
||||
postCommitHooks []TxnFunc
|
||||
postRollbackHooks []TxnFunc
|
||||
postCompleteHooks []TxnFunc
|
||||
preCommitHooks []TxnFunc
|
||||
postCommitHooks []MustFunc
|
||||
postRollbackHooks []MustFunc
|
||||
postCompleteHooks []MustFunc
|
||||
}
|
||||
|
||||
func (m *hookManager) register(ctx context.Context) context.Context {
|
||||
@@ -28,39 +29,55 @@ func hookManagerCtx(ctx context.Context) *hookManager {
|
||||
return m
|
||||
}
|
||||
|
||||
func executeHooks(ctx context.Context, hooks []TxnFunc) {
|
||||
func executeHooks(ctx context.Context, hooks []TxnFunc) error {
|
||||
// we need to return the first error
|
||||
for _, h := range hooks {
|
||||
// ignore errors
|
||||
_ = h(ctx)
|
||||
if err := h(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func executeMustHooks(ctx context.Context, hooks []MustFunc) {
|
||||
for _, h := range hooks {
|
||||
h(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func executePostCommitHooks(ctx context.Context, outerCtx context.Context) {
|
||||
m := hookManagerCtx(ctx)
|
||||
executeHooks(outerCtx, m.postCommitHooks)
|
||||
func (m *hookManager) executePostCommitHooks(ctx context.Context) {
|
||||
executeMustHooks(ctx, m.postCommitHooks)
|
||||
}
|
||||
|
||||
func executePostRollbackHooks(ctx context.Context, outerCtx context.Context) {
|
||||
m := hookManagerCtx(ctx)
|
||||
executeHooks(outerCtx, m.postRollbackHooks)
|
||||
func (m *hookManager) executePostRollbackHooks(ctx context.Context) {
|
||||
executeMustHooks(ctx, m.postRollbackHooks)
|
||||
}
|
||||
|
||||
func executePostCompleteHooks(ctx context.Context, outerCtx context.Context) {
|
||||
m := hookManagerCtx(ctx)
|
||||
executeHooks(outerCtx, m.postCompleteHooks)
|
||||
func (m *hookManager) executePreCommitHooks(ctx context.Context) error {
|
||||
return executeHooks(ctx, m.preCommitHooks)
|
||||
}
|
||||
|
||||
func AddPostCommitHook(ctx context.Context, hook TxnFunc) {
|
||||
func (m *hookManager) executePostCompleteHooks(ctx context.Context) {
|
||||
executeMustHooks(ctx, m.postCompleteHooks)
|
||||
}
|
||||
|
||||
func AddPreCommitHook(ctx context.Context, hook TxnFunc) {
|
||||
m := hookManagerCtx(ctx)
|
||||
m.preCommitHooks = append(m.preCommitHooks, hook)
|
||||
}
|
||||
|
||||
func AddPostCommitHook(ctx context.Context, hook MustFunc) {
|
||||
m := hookManagerCtx(ctx)
|
||||
m.postCommitHooks = append(m.postCommitHooks, hook)
|
||||
}
|
||||
|
||||
func AddPostRollbackHook(ctx context.Context, hook TxnFunc) {
|
||||
func AddPostRollbackHook(ctx context.Context, hook MustFunc) {
|
||||
m := hookManagerCtx(ctx)
|
||||
m.postRollbackHooks = append(m.postRollbackHooks, hook)
|
||||
}
|
||||
|
||||
func AddPostCompleteHook(ctx context.Context, hook TxnFunc) {
|
||||
func AddPostCompleteHook(ctx context.Context, hook MustFunc) {
|
||||
m := hookManagerCtx(ctx)
|
||||
m.postCompleteHooks = append(m.postCompleteHooks, hook)
|
||||
}
|
||||
|
||||
@@ -17,13 +17,14 @@ type DatabaseProvider interface {
|
||||
WithDatabase(ctx context.Context) (context.Context, error)
|
||||
}
|
||||
|
||||
type DatabaseProviderManager interface {
|
||||
DatabaseProvider
|
||||
Manager
|
||||
}
|
||||
|
||||
// TxnFunc is a function that is used in transaction hooks.
|
||||
// It should return an error if something went wrong.
|
||||
type TxnFunc func(ctx context.Context) error
|
||||
|
||||
// MustFunc is a function that is used in transaction hooks.
|
||||
// It does not return an error.
|
||||
type MustFunc func(ctx context.Context)
|
||||
|
||||
// WithTxn executes fn in a transaction. If fn returns an error then
|
||||
// the transaction is rolled back. Otherwise it is committed.
|
||||
// Transaction is exclusive. Only one thread may run a transaction
|
||||
@@ -51,35 +52,44 @@ func WithReadTxn(ctx context.Context, m Manager, fn TxnFunc) error {
|
||||
return withTxn(ctx, m, fn, exclusive, execComplete)
|
||||
}
|
||||
|
||||
func withTxn(outerCtx context.Context, m Manager, fn TxnFunc, exclusive bool, execCompleteOnLocked bool) error {
|
||||
ctx, err := begin(outerCtx, m, exclusive)
|
||||
func withTxn(ctx context.Context, m Manager, fn TxnFunc, exclusive bool, execCompleteOnLocked bool) error {
|
||||
// post-hooks should be executed with the outside context
|
||||
txnCtx, err := begin(ctx, m, exclusive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hookMgr := hookManagerCtx(txnCtx)
|
||||
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
// a panic occurred, rollback and repanic
|
||||
rollback(ctx, outerCtx, m)
|
||||
rollback(txnCtx, m)
|
||||
panic(p)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// something went wrong, rollback
|
||||
rollback(ctx, outerCtx, m)
|
||||
rollback(txnCtx, m)
|
||||
|
||||
// execute post-hooks with outside context
|
||||
hookMgr.executePostRollbackHooks(ctx)
|
||||
|
||||
if execCompleteOnLocked || !m.IsLocked(err) {
|
||||
executePostCompleteHooks(ctx, outerCtx)
|
||||
hookMgr.executePostCompleteHooks(ctx)
|
||||
}
|
||||
} else {
|
||||
// all good, commit
|
||||
err = commit(ctx, outerCtx, m)
|
||||
executePostCompleteHooks(ctx, outerCtx)
|
||||
err = commit(txnCtx, m)
|
||||
|
||||
// execute post-hooks with outside context
|
||||
hookMgr.executePostCommitHooks(ctx)
|
||||
hookMgr.executePostCompleteHooks(ctx)
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
err = fn(ctx)
|
||||
err = fn(txnCtx)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -96,21 +106,23 @@ func begin(ctx context.Context, m Manager, exclusive bool) (context.Context, err
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func commit(ctx context.Context, outerCtx context.Context, m Manager) error {
|
||||
func commit(ctx context.Context, m Manager) error {
|
||||
hookMgr := hookManagerCtx(ctx)
|
||||
if err := hookMgr.executePreCommitHooks(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.Commit(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
executePostCommitHooks(ctx, outerCtx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollback(ctx context.Context, outerCtx context.Context, m Manager) {
|
||||
func rollback(ctx context.Context, m Manager) {
|
||||
if err := m.Rollback(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
executePostRollbackHooks(ctx, outerCtx)
|
||||
}
|
||||
|
||||
// WithDatabase executes fn with the context provided by p.WithDatabase.
|
||||
|
||||
@@ -15,8 +15,11 @@ import {
|
||||
VideoPreviewInput,
|
||||
VideoPreviewSettingsInput,
|
||||
} from "./GeneratePreviewOptions";
|
||||
import { useIntl } from "react-intl";
|
||||
|
||||
export const SettingsConfigurationPanel: React.FC = () => {
|
||||
const intl = useIntl();
|
||||
|
||||
const { general, loading, error, saveGeneral } =
|
||||
React.useContext(SettingStateContext);
|
||||
|
||||
@@ -94,20 +97,23 @@ export const SettingsConfigurationPanel: React.FC = () => {
|
||||
return GQL.HashAlgorithm.Md5;
|
||||
}
|
||||
|
||||
function blobStorageTypeToID(value: GQL.BlobsStorageType | undefined) {
|
||||
switch (value) {
|
||||
case GQL.BlobsStorageType.Database:
|
||||
return "blobs_storage_type.database";
|
||||
case GQL.BlobsStorageType.Filesystem:
|
||||
return "blobs_storage_type.filesystem";
|
||||
}
|
||||
|
||||
return "blobs_storage_type.database";
|
||||
}
|
||||
|
||||
if (error) return <h1>{error.message}</h1>;
|
||||
if (loading) return <LoadingIndicator />;
|
||||
|
||||
return (
|
||||
<>
|
||||
<SettingSection headingID="config.application_paths.heading">
|
||||
<StringSetting
|
||||
id="database-path"
|
||||
headingID="config.general.db_path_head"
|
||||
subHeadingID="config.general.sqlite_location"
|
||||
value={general.databasePath ?? undefined}
|
||||
onChange={(v) => saveGeneral({ databasePath: v })}
|
||||
/>
|
||||
|
||||
<StringSetting
|
||||
id="generated-path"
|
||||
headingID="config.general.generated_path_head"
|
||||
@@ -165,6 +171,38 @@ export const SettingsConfigurationPanel: React.FC = () => {
|
||||
/>
|
||||
</SettingSection>
|
||||
|
||||
<SettingSection headingID="config.general.database">
|
||||
<StringSetting
|
||||
id="database-path"
|
||||
headingID="config.general.db_path_head"
|
||||
subHeadingID="config.general.sqlite_location"
|
||||
value={general.databasePath ?? undefined}
|
||||
onChange={(v) => saveGeneral({ databasePath: v })}
|
||||
/>
|
||||
<SelectSetting
|
||||
id="blobs-storage"
|
||||
headingID="config.general.blobs_storage.heading"
|
||||
subHeadingID="config.general.blobs_storage.description"
|
||||
value={general.blobsStorage ?? GQL.BlobsStorageType.Database}
|
||||
onChange={(v) =>
|
||||
saveGeneral({ blobsStorage: v as GQL.BlobsStorageType })
|
||||
}
|
||||
>
|
||||
{Object.values(GQL.BlobsStorageType).map((q) => (
|
||||
<option key={q} value={q}>
|
||||
{intl.formatMessage({ id: blobStorageTypeToID(q) })}
|
||||
</option>
|
||||
))}
|
||||
</SelectSetting>
|
||||
<StringSetting
|
||||
id="blobs-path"
|
||||
headingID="config.general.blobs_path.heading"
|
||||
subHeadingID="config.general.blobs_path.description"
|
||||
value={general.blobsPath ?? ""}
|
||||
onChange={(v) => saveGeneral({ blobsPath: v })}
|
||||
/>
|
||||
</SettingSection>
|
||||
|
||||
<SettingSection headingID="config.general.hashing">
|
||||
<BooleanSetting
|
||||
id="calculate-md5-and-ohash"
|
||||
|
||||
@@ -8,6 +8,8 @@ import {
|
||||
mutateMetadataImport,
|
||||
mutateMetadataClean,
|
||||
mutateAnonymiseDatabase,
|
||||
mutateMigrateSceneScreenshots,
|
||||
mutateMigrateBlobs,
|
||||
} from "src/core/StashService";
|
||||
import { useToast } from "src/hooks/Toast";
|
||||
import downloadFile from "src/utils/download";
|
||||
@@ -170,6 +172,17 @@ export const DataManagementTasks: React.FC<IDataManagementTasks> = ({
|
||||
dryRun: false,
|
||||
});
|
||||
|
||||
const [migrateBlobsOptions, setMigrateBlobsOptions] =
|
||||
useState<GQL.MigrateBlobsInput>({
|
||||
deleteOld: true,
|
||||
});
|
||||
|
||||
const [migrateSceneScreenshotsOptions, setMigrateSceneScreenshotsOptions] =
|
||||
useState<GQL.MigrateSceneScreenshotsInput>({
|
||||
deleteFiles: false,
|
||||
overwriteExisting: false,
|
||||
});
|
||||
|
||||
type DialogOpenState = typeof dialogOpen;
|
||||
|
||||
function setDialogOpen(s: Partial<DialogOpenState>) {
|
||||
@@ -256,6 +269,42 @@ export const DataManagementTasks: React.FC<IDataManagementTasks> = ({
|
||||
}
|
||||
}
|
||||
|
||||
async function onMigrateSceneScreenshots() {
|
||||
try {
|
||||
await mutateMigrateSceneScreenshots(migrateSceneScreenshotsOptions);
|
||||
Toast.success({
|
||||
content: intl.formatMessage(
|
||||
{ id: "config.tasks.added_job_to_queue" },
|
||||
{
|
||||
operation_name: intl.formatMessage({
|
||||
id: "actions.migrate_scene_screenshots",
|
||||
}),
|
||||
}
|
||||
),
|
||||
});
|
||||
} catch (err) {
|
||||
Toast.error(err);
|
||||
}
|
||||
}
|
||||
|
||||
async function onMigrateBlobs() {
|
||||
try {
|
||||
await mutateMigrateBlobs(migrateBlobsOptions);
|
||||
Toast.success({
|
||||
content: intl.formatMessage(
|
||||
{ id: "config.tasks.added_job_to_queue" },
|
||||
{
|
||||
operation_name: intl.formatMessage({
|
||||
id: "actions.migrate_blobs",
|
||||
}),
|
||||
}
|
||||
),
|
||||
});
|
||||
} catch (err) {
|
||||
Toast.error(err);
|
||||
}
|
||||
}
|
||||
|
||||
async function onExport() {
|
||||
try {
|
||||
await mutateMetadataExport();
|
||||
@@ -507,6 +556,69 @@ export const DataManagementTasks: React.FC<IDataManagementTasks> = ({
|
||||
<FormattedMessage id="actions.rename_gen_files" />
|
||||
</Button>
|
||||
</Setting>
|
||||
|
||||
<div className="setting-group">
|
||||
<Setting
|
||||
headingID="actions.migrate_blobs"
|
||||
subHeadingID="config.tasks.migrate_blobs.description"
|
||||
>
|
||||
<Button
|
||||
id="migrateBlobs"
|
||||
variant="danger"
|
||||
onClick={() => onMigrateBlobs()}
|
||||
>
|
||||
<FormattedMessage id="actions.migrate_blobs" />
|
||||
</Button>
|
||||
</Setting>
|
||||
|
||||
<BooleanSetting
|
||||
id="migrate-blobs-delete-old"
|
||||
checked={migrateBlobsOptions.deleteOld ?? false}
|
||||
headingID="config.tasks.migrate_blobs.delete_old"
|
||||
onChange={(v) =>
|
||||
setMigrateBlobsOptions({ ...migrateBlobsOptions, deleteOld: v })
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="setting-group">
|
||||
<Setting
|
||||
headingID="actions.migrate_scene_screenshots"
|
||||
subHeadingID="config.tasks.migrate_scene_screenshots.description"
|
||||
>
|
||||
<Button
|
||||
id="migrateSceneScreenshots"
|
||||
variant="danger"
|
||||
onClick={() => onMigrateSceneScreenshots()}
|
||||
>
|
||||
<FormattedMessage id="actions.migrate_scene_screenshots" />
|
||||
</Button>
|
||||
</Setting>
|
||||
|
||||
<BooleanSetting
|
||||
id="migrate-scene-screenshots-overwrite-existing"
|
||||
checked={migrateSceneScreenshotsOptions.overwriteExisting ?? false}
|
||||
headingID="config.tasks.migrate_scene_screenshots.overwrite_existing"
|
||||
onChange={(v) =>
|
||||
setMigrateSceneScreenshotsOptions({
|
||||
...migrateSceneScreenshotsOptions,
|
||||
overwriteExisting: v,
|
||||
})
|
||||
}
|
||||
/>
|
||||
|
||||
<BooleanSetting
|
||||
id="migrate-scene-screenshots-delete-files"
|
||||
checked={migrateSceneScreenshotsOptions.deleteFiles ?? false}
|
||||
headingID="config.tasks.migrate_scene_screenshots.delete_files"
|
||||
onChange={(v) =>
|
||||
setMigrateSceneScreenshotsOptions({
|
||||
...migrateSceneScreenshotsOptions,
|
||||
deleteFiles: v,
|
||||
})
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
</SettingSection>
|
||||
</Form.Group>
|
||||
);
|
||||
|
||||
@@ -26,6 +26,12 @@ export const GenerateOptions: React.FC<IGenerateOptions> = ({
|
||||
|
||||
return (
|
||||
<>
|
||||
<BooleanSetting
|
||||
id="covers-task"
|
||||
headingID="dialogs.scene_gen.covers"
|
||||
checked={options.covers ?? false}
|
||||
onChange={(v) => setOptions({ covers: v })}
|
||||
/>
|
||||
<BooleanSetting
|
||||
id="preview-task"
|
||||
checked={options.previews ?? false}
|
||||
|
||||
@@ -90,6 +90,7 @@ export const LibraryTasks: React.FC = () => {
|
||||
|
||||
function getDefaultGenerateOptions(): GQL.GenerateMetadataInput {
|
||||
return {
|
||||
covers: true,
|
||||
sprites: true,
|
||||
phashes: true,
|
||||
previews: true,
|
||||
|
||||
@@ -12,6 +12,7 @@ export const ScanOptions: React.FC<IScanOptions> = ({
|
||||
setOptions: setOptionsState,
|
||||
}) => {
|
||||
const {
|
||||
scanGenerateCovers,
|
||||
scanGeneratePreviews,
|
||||
scanGenerateImagePreviews,
|
||||
scanGenerateSprites,
|
||||
@@ -25,6 +26,12 @@ export const ScanOptions: React.FC<IScanOptions> = ({
|
||||
|
||||
return (
|
||||
<>
|
||||
<BooleanSetting
|
||||
id="scan-generate-covers"
|
||||
headingID="config.tasks.generate_video_covers_during_scan"
|
||||
checked={scanGenerateCovers ?? false}
|
||||
onChange={(v) => setOptions({ scanGenerateCovers: v })}
|
||||
/>
|
||||
<BooleanSetting
|
||||
id="scan-generate-previews"
|
||||
headingID="config.tasks.generate_video_previews_during_scan"
|
||||
|
||||
@@ -40,6 +40,7 @@ export const Setup: React.FC = () => {
|
||||
const [databaseFile, setDatabaseFile] = useState("");
|
||||
const [generatedLocation, setGeneratedLocation] = useState("");
|
||||
const [cacheLocation, setCacheLocation] = useState("");
|
||||
const [blobsLocation, setBlobsLocation] = useState("blobs");
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [setupError, setSetupError] = useState("");
|
||||
|
||||
@@ -49,6 +50,7 @@ export const Setup: React.FC = () => {
|
||||
const [showGeneratedSelectDialog, setShowGeneratedSelectDialog] =
|
||||
useState(false);
|
||||
const [showCacheSelectDialog, setShowCacheSelectDialog] = useState(false);
|
||||
const [showBlobsDialog, setShowBlobsDialog] = useState(false);
|
||||
|
||||
const { data: systemStatus, loading: statusLoading } = useSystemStatus();
|
||||
|
||||
@@ -253,6 +255,22 @@ export const Setup: React.FC = () => {
|
||||
return <FolderSelectDialog onClose={onGeneratedSelectClosed} />;
|
||||
}
|
||||
|
||||
function onBlobsClosed(d?: string) {
|
||||
if (d) {
|
||||
setBlobsLocation(d);
|
||||
}
|
||||
|
||||
setShowBlobsDialog(false);
|
||||
}
|
||||
|
||||
function maybeRenderBlobsSelectDialog() {
|
||||
if (!showBlobsDialog) {
|
||||
return;
|
||||
}
|
||||
|
||||
return <FolderSelectDialog onClose={onBlobsClosed} />;
|
||||
}
|
||||
|
||||
function maybeRenderGenerated() {
|
||||
if (!configuration?.general.generatedPath) {
|
||||
return (
|
||||
@@ -351,6 +369,56 @@ export const Setup: React.FC = () => {
|
||||
}
|
||||
}
|
||||
|
||||
function maybeRenderBlobs() {
|
||||
if (!configuration?.general.blobsPath) {
|
||||
return (
|
||||
<Form.Group id="blobs">
|
||||
<h3>
|
||||
<FormattedMessage id="setup.paths.where_can_stash_store_blobs" />
|
||||
</h3>
|
||||
<p>
|
||||
<FormattedMessage
|
||||
id="setup.paths.where_can_stash_store_blobs_description"
|
||||
values={{
|
||||
code: (chunks: string) => <code>{chunks}</code>,
|
||||
}}
|
||||
/>
|
||||
</p>
|
||||
<p>
|
||||
<FormattedMessage
|
||||
id="setup.paths.where_can_stash_store_blobs_description_addendum"
|
||||
values={{
|
||||
code: (chunks: string) => <code>{chunks}</code>,
|
||||
strong: (chunks: string) => <strong>{chunks}</strong>,
|
||||
}}
|
||||
/>
|
||||
</p>
|
||||
<InputGroup>
|
||||
<Form.Control
|
||||
className="text-input"
|
||||
value={blobsLocation}
|
||||
placeholder={intl.formatMessage({
|
||||
id: "setup.paths.path_to_blobs_directory_empty_for_database",
|
||||
})}
|
||||
onChange={(e: React.ChangeEvent<HTMLInputElement>) =>
|
||||
setBlobsLocation(e.currentTarget.value)
|
||||
}
|
||||
/>
|
||||
<InputGroup.Append>
|
||||
<Button
|
||||
variant="secondary"
|
||||
className="text-input"
|
||||
onClick={() => setShowBlobsDialog(true)}
|
||||
>
|
||||
<Icon icon={faEllipsisH} />
|
||||
</Button>
|
||||
</InputGroup.Append>
|
||||
</InputGroup>
|
||||
</Form.Group>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function renderSetPaths() {
|
||||
return (
|
||||
<>
|
||||
@@ -410,6 +478,7 @@ export const Setup: React.FC = () => {
|
||||
</Form.Group>
|
||||
{maybeRenderGenerated()}
|
||||
{maybeRenderCache()}
|
||||
{maybeRenderBlobs()}
|
||||
</section>
|
||||
<section className="mt-5">
|
||||
<div className="d-flex justify-content-center">
|
||||
@@ -474,6 +543,7 @@ export const Setup: React.FC = () => {
|
||||
databaseFile,
|
||||
generatedLocation,
|
||||
cacheLocation,
|
||||
blobsLocation,
|
||||
stashes,
|
||||
});
|
||||
// Set lastNoteSeen to hide release notes dialog
|
||||
@@ -556,6 +626,18 @@ export const Setup: React.FC = () => {
|
||||
})}
|
||||
</code>
|
||||
</dd>
|
||||
<dt>
|
||||
<FormattedMessage id="setup.confirm.blobs_directory" />
|
||||
</dt>
|
||||
<dd>
|
||||
<code>
|
||||
{blobsLocation !== ""
|
||||
? blobsLocation
|
||||
: intl.formatMessage({
|
||||
id: "setup.confirm.default_blobs_location",
|
||||
})}
|
||||
</code>
|
||||
</dd>
|
||||
</dl>
|
||||
</section>
|
||||
<section className="mt-5">
|
||||
@@ -739,6 +821,7 @@ export const Setup: React.FC = () => {
|
||||
<Container>
|
||||
{maybeRenderGeneratedSelectDialog()}
|
||||
{maybeRenderCacheSelectDialog()}
|
||||
{maybeRenderBlobsSelectDialog()}
|
||||
<h1 className="text-center">
|
||||
<FormattedMessage id="setup.stash_setup_wizard" />
|
||||
</h1>
|
||||
|
||||
@@ -1215,6 +1215,20 @@ export const mutateMigrateHashNaming = () =>
|
||||
mutation: GQL.MigrateHashNamingDocument,
|
||||
});
|
||||
|
||||
export const mutateMigrateSceneScreenshots = (
|
||||
input: GQL.MigrateSceneScreenshotsInput
|
||||
) =>
|
||||
client.mutate<GQL.MigrateSceneScreenshotsMutation>({
|
||||
mutation: GQL.MigrateSceneScreenshotsDocument,
|
||||
variables: { input },
|
||||
});
|
||||
|
||||
export const mutateMigrateBlobs = (input: GQL.MigrateBlobsInput) =>
|
||||
client.mutate<GQL.MigrateBlobsMutation>({
|
||||
mutation: GQL.MigrateBlobsDocument,
|
||||
variables: { input },
|
||||
});
|
||||
|
||||
export const mutateMetadataExport = () =>
|
||||
client.mutate<GQL.MetadataExportMutation>({
|
||||
mutation: GQL.MetadataExportDocument,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user