diff --git a/graphql/schema/types/metadata.graphql b/graphql/schema/types/metadata.graphql index d8b94d477..96784ee9d 100644 --- a/graphql/schema/types/metadata.graphql +++ b/graphql/schema/types/metadata.graphql @@ -71,10 +71,19 @@ input ScanMetaDataFilterInput { input ScanMetadataInput { paths: [String!] + # useFileMetadata is deprecated with the new file management system + # if this functionality is desired, then we can make a built in scraper instead. + """Set name, date, details from metadata (if present)""" - useFileMetadata: Boolean + useFileMetadata: Boolean @deprecated(reason: "Not implemented") + + # stripFileExtension is deprecated since we no longer set the title from the + # filename - it is automatically returned if the object has no title. If this + # functionality is desired, then we could make this an option to not include + # the extension in the auto-generated title. + """Strip file extension from title""" - stripFileExtension: Boolean + stripFileExtension: Boolean @deprecated(reason: "Not implemented") """Generate previews during scan""" scanGeneratePreviews: Boolean """Generate image previews during scan""" diff --git a/internal/api/resolver_mutation_stash_box.go b/internal/api/resolver_mutation_stash_box.go index 95e300f9d..e44c994d2 100644 --- a/internal/api/resolver_mutation_stash_box.go +++ b/internal/api/resolver_mutation_stash_box.go @@ -59,7 +59,7 @@ func (r *mutationResolver) SubmitStashBoxSceneDraft(ctx context.Context, input S } filepath := manager.GetInstance().Paths.Scene.GetScreenshotPath(scene.GetHash(config.GetInstance().GetVideoFileNamingAlgorithm())) - res, err = client.SubmitSceneDraft(ctx, id, boxes[input.StashBoxIndex].Endpoint, filepath) + res, err = client.SubmitSceneDraft(ctx, scene, boxes[input.StashBoxIndex].Endpoint, filepath) return err }) diff --git a/internal/api/routes_image.go b/internal/api/routes_image.go index 93a546a3c..667d03eaf 100644 --- a/internal/api/routes_image.go +++ b/internal/api/routes_image.go @@ -66,7 +66,7 @@ func (rs imageRoutes) Thumbnail(w http.ResponseWriter, r *http.Request) { if err != nil { // don't log for unsupported image format if !errors.Is(err, image.ErrNotSupportedForThumbnail) { - logger.Errorf("error generating thumbnail for image: %s", err.Error()) + logger.Errorf("error generating thumbnail for %s: %v", f.Path, err) var exitErr *exec.ExitError if errors.As(err, &exitErr) { diff --git a/internal/manager/config/tasks.go b/internal/manager/config/tasks.go index e455ea3e1..2f69c8a50 100644 --- a/internal/manager/config/tasks.go +++ b/internal/manager/config/tasks.go @@ -2,8 +2,10 @@ package config type ScanMetadataOptions struct { // Set name, date, details from metadata (if present) + // Deprecated: not implemented UseFileMetadata bool `json:"useFileMetadata"` // Strip file extension from title + // Deprecated: not implemented StripFileExtension bool `json:"stripFileExtension"` // Generate previews during scan ScanGeneratePreviews bool `json:"scanGeneratePreviews"` diff --git a/internal/manager/manager.go b/internal/manager/manager.go index f58b9ec05..fb11b7a6c 100644 --- a/internal/manager/manager.go +++ b/internal/manager/manager.go @@ -197,6 +197,8 @@ func initialize() error { Repository: db.Gallery, ImageFinder: db.Image, ImageService: instance.ImageService, + File: db.File, + Folder: db.Folder, } instance.JobManager = initJobManager() @@ -265,15 +267,15 @@ func initialize() error { return nil } -func videoFileFilter(f file.File) bool { +func videoFileFilter(ctx context.Context, f file.File) bool { return isVideo(f.Base().Basename) } -func imageFileFilter(f file.File) bool { +func imageFileFilter(ctx context.Context, f file.File) bool { return isImage(f.Base().Basename) } -func galleryFileFilter(f file.File) bool { +func galleryFileFilter(ctx context.Context, f file.File) bool { return isZip(f.Base().Basename) } diff --git a/internal/manager/repository.go b/internal/manager/repository.go index 1f78e4209..0be022cac 100644 --- a/internal/manager/repository.go +++ b/internal/manager/repository.go @@ -86,6 +86,7 @@ type SceneService interface { type ImageService interface { Destroy(ctx context.Context, image *models.Image, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile bool) error + DestroyZipImages(ctx context.Context, zipFile file.File, fileDeleter *image.FileDeleter, deleteGenerated bool) ([]*models.Image, error) } type GalleryService interface { diff --git a/internal/manager/task_clean.go b/internal/manager/task_clean.go index f9f9fc6e1..c19a57e27 100644 --- a/internal/manager/task_clean.go +++ b/internal/manager/task_clean.go @@ -60,11 +60,9 @@ type cleanFilter struct { func newCleanFilter(c *config.Instance) *cleanFilter { return &cleanFilter{ scanFilter: scanFilter{ + extensionConfig: newExtensionConfig(c), stashPaths: c.GetStashPaths(), generatedPath: c.GetGeneratedPath(), - vidExt: c.GetVideoExtensions(), - imgExt: c.GetImageExtensions(), - zipExt: c.GetGalleryExtensions(), videoExcludeRegex: generateRegexps(c.GetExcludes()), imageExcludeRegex: generateRegexps(c.GetImageExcludes()), }, diff --git a/internal/manager/task_scan.go b/internal/manager/task_scan.go index 97b1d4922..6ea509e5d 100644 --- a/internal/manager/task_scan.go +++ b/internal/manager/task_scan.go @@ -51,11 +51,17 @@ func (j *ScanJob) Execute(ctx context.Context, progress *job.Progress) { const taskQueueSize = 200000 taskQueue := job.NewTaskQueue(ctx, progress, taskQueueSize, instance.Config.GetParallelTasksWithAutoDetection()) + var minModTime time.Time + if j.input.Filter != nil && j.input.Filter.MinModTime != nil { + minModTime = *j.input.Filter.MinModTime + } + j.scanner.Scan(ctx, getScanHandlers(j.input, taskQueue, progress), file.ScanOptions{ - Paths: paths, - ScanFilters: []file.PathFilter{newScanFilter(instance.Config)}, - ZipFileExtensions: instance.Config.GetGalleryExtensions(), - ParallelTasks: instance.Config.GetParallelTasksWithAutoDetection(), + Paths: paths, + ScanFilters: []file.PathFilter{newScanFilter(instance.Config, minModTime)}, + ZipFileExtensions: instance.Config.GetGalleryExtensions(), + ParallelTasks: instance.Config.GetParallelTasksWithAutoDetection(), + HandlerRequiredFilters: []file.Filter{newHandlerRequiredFilter(instance.Config)}, }, progress) taskQueue.Close() @@ -71,25 +77,92 @@ func (j *ScanJob) Execute(ctx context.Context, progress *job.Progress) { j.subscriptions.notify() } -type scanFilter struct { - stashPaths []*config.StashConfig - generatedPath string - vidExt []string - imgExt []string - zipExt []string - videoExcludeRegex []*regexp.Regexp - imageExcludeRegex []*regexp.Regexp +type extensionConfig struct { + vidExt []string + imgExt []string + zipExt []string } -func newScanFilter(c *config.Instance) *scanFilter { +func newExtensionConfig(c *config.Instance) extensionConfig { + return extensionConfig{ + vidExt: c.GetVideoExtensions(), + imgExt: c.GetImageExtensions(), + zipExt: c.GetGalleryExtensions(), + } +} + +type fileCounter interface { + CountByFileID(ctx context.Context, fileID file.ID) (int, error) +} + +// handlerRequiredFilter returns true if a File's handler needs to be executed despite the file not being updated. +type handlerRequiredFilter struct { + extensionConfig + SceneFinder fileCounter + ImageFinder fileCounter + GalleryFinder fileCounter +} + +func newHandlerRequiredFilter(c *config.Instance) *handlerRequiredFilter { + db := instance.Database + + return &handlerRequiredFilter{ + extensionConfig: newExtensionConfig(c), + SceneFinder: db.Scene, + ImageFinder: db.Image, + GalleryFinder: db.Gallery, + } +} + +func (f *handlerRequiredFilter) Accept(ctx context.Context, ff file.File) bool { + path := ff.Base().Path + isVideoFile := fsutil.MatchExtension(path, f.vidExt) + isImageFile := fsutil.MatchExtension(path, f.imgExt) + isZipFile := fsutil.MatchExtension(path, f.zipExt) + + var counter fileCounter + + switch { + case isVideoFile: + // return true if there are no scenes associated + counter = f.SceneFinder + case isImageFile: + counter = f.ImageFinder + case isZipFile: + counter = f.GalleryFinder + } + + if counter == nil { + return false + } + + n, err := counter.CountByFileID(ctx, ff.Base().ID) + if err != nil { + // just ignore + return false + } + + // execute handler if there are no related objects + return n == 0 +} + +type scanFilter struct { + extensionConfig + stashPaths []*config.StashConfig + generatedPath string + videoExcludeRegex []*regexp.Regexp + imageExcludeRegex []*regexp.Regexp + minModTime time.Time +} + +func newScanFilter(c *config.Instance, minModTime time.Time) *scanFilter { return &scanFilter{ + extensionConfig: newExtensionConfig(c), stashPaths: c.GetStashPaths(), generatedPath: c.GetGeneratedPath(), - vidExt: c.GetVideoExtensions(), - imgExt: c.GetImageExtensions(), - zipExt: c.GetGalleryExtensions(), videoExcludeRegex: generateRegexps(c.GetExcludes()), imageExcludeRegex: generateRegexps(c.GetImageExcludes()), + minModTime: minModTime, } } @@ -98,6 +171,11 @@ func (f *scanFilter) Accept(ctx context.Context, path string, info fs.FileInfo) return false } + // exit early on cutoff + if info.Mode().IsRegular() && info.ModTime().Before(f.minModTime) { + return false + } + isVideoFile := fsutil.MatchExtension(path, f.vidExt) isImageFile := fsutil.MatchExtension(path, f.imgExt) isZipFile := fsutil.MatchExtension(path, f.zipExt) diff --git a/pkg/file/delete.go b/pkg/file/delete.go index 52abe7271..b667aca6f 100644 --- a/pkg/file/delete.go +++ b/pkg/file/delete.go @@ -180,6 +180,43 @@ func Destroy(ctx context.Context, destroyer Destroyer, f File, fileDeleter *Dele return err } + // don't delete files in zip files + if deleteFile && f.Base().ZipFileID != nil { + if err := fileDeleter.Files([]string{f.Base().Path}); err != nil { + return err + } + } + + return nil +} + +type FolderGetterDestroyer interface { + FolderGetter + FolderDestroyer +} + +type ZipDestroyer struct { + FileDestroyer Destroyer + FolderDestroyer FolderGetterDestroyer +} + +func (d *ZipDestroyer) DestroyZip(ctx context.Context, f File, fileDeleter *Deleter, deleteFile bool) error { + // destroy contained folders + folders, err := d.FolderDestroyer.FindByZipFileID(ctx, f.Base().ID) + if err != nil { + return err + } + + for _, ff := range folders { + if err := d.FolderDestroyer.Destroy(ctx, ff.ID); err != nil { + return err + } + } + + if err := d.FileDestroyer.Destroy(ctx, f.Base().ID); err != nil { + return err + } + if deleteFile { if err := fileDeleter.Files([]string{f.Base().Path}); err != nil { return err diff --git a/pkg/file/file.go b/pkg/file/file.go index 425057d33..d618a2e5f 100644 --- a/pkg/file/file.go +++ b/pkg/file/file.go @@ -198,7 +198,7 @@ type FilteredDecorator struct { // Decorate runs the decorator if the filter accepts the file. func (d *FilteredDecorator) Decorate(ctx context.Context, fs FS, f File) (File, error) { - if d.Accept(f) { + if d.Accept(ctx, f) { return d.Decorator.Decorate(ctx, fs, f) } return f, nil diff --git a/pkg/file/handler.go b/pkg/file/handler.go index c06ff2477..b51b2a76a 100644 --- a/pkg/file/handler.go +++ b/pkg/file/handler.go @@ -18,13 +18,13 @@ func (pff PathFilterFunc) Accept(path string) bool { // Filter provides a filter function for Files. type Filter interface { - Accept(f File) bool + Accept(ctx context.Context, f File) bool } -type FilterFunc func(f File) bool +type FilterFunc func(ctx context.Context, f File) bool -func (ff FilterFunc) Accept(f File) bool { - return ff(f) +func (ff FilterFunc) Accept(ctx context.Context, f File) bool { + return ff(ctx, f) } // Handler provides a handler for Files. @@ -40,7 +40,7 @@ type FilteredHandler struct { // Handle runs the handler if the filter accepts the file. func (h *FilteredHandler) Handle(ctx context.Context, f File) error { - if h.Accept(f) { + if h.Accept(ctx, f) { return h.Handler.Handle(ctx, f) } return nil diff --git a/pkg/file/scan.go b/pkg/file/scan.go index 3b3e125ef..1ef36c33e 100644 --- a/pkg/file/scan.go +++ b/pkg/file/scan.go @@ -100,6 +100,9 @@ type ScanOptions struct { // ScanFilters are used to determine if a file should be scanned. ScanFilters []PathFilter + // HandlerRequiredFilters are used to determine if an unchanged file needs to be handled + HandlerRequiredFilters []Filter + ParallelTasks int } @@ -616,8 +619,15 @@ func (s *scanJob) onNewFile(ctx context.Context, f scanFile) (File, error) { baseFile.SetFingerprints(fp) + file, err := s.fireDecorators(ctx, f.fs, baseFile) + if err != nil { + s.incrementProgress() + return nil, err + } + // determine if the file is renamed from an existing file in the store - renamed, err := s.handleRename(ctx, baseFile, fp) + // do this after decoration so that missing fields can be populated + renamed, err := s.handleRename(ctx, file, fp) if err != nil { s.incrementProgress() return nil, err @@ -627,15 +637,8 @@ func (s *scanJob) onNewFile(ctx context.Context, f scanFile) (File, error) { return renamed, nil } - file, err := s.fireDecorators(ctx, f.fs, baseFile) - if err != nil { - s.incrementProgress() - return nil, err - } - // if not renamed, queue file for creation if err := s.queueDBOperation(ctx, path, func(ctx context.Context) error { - logger.Infof("%s doesn't exist. Creating new file entry...", path) if err := s.Repository.Create(ctx, file); err != nil { return fmt.Errorf("creating file %q: %w", path, err) } @@ -733,7 +736,7 @@ func (s *scanJob) getFileFS(f *BaseFile) (FS, error) { return fs.OpenZip(zipPath) } -func (s *scanJob) handleRename(ctx context.Context, f *BaseFile, fp []Fingerprint) (File, error) { +func (s *scanJob) handleRename(ctx context.Context, f File, fp []Fingerprint) (File, error) { var others []File for _, tfp := range fp { @@ -761,36 +764,48 @@ func (s *scanJob) handleRename(ctx context.Context, f *BaseFile, fp []Fingerprin } n := len(missing) - switch { - case n == 1: - // assume does not exist, update existing file - other := missing[0] - otherBase := other.Base() - - logger.Infof("%s moved to %s. Updating path...", otherBase.Path, f.Path) - f.ID = otherBase.ID - f.CreatedAt = otherBase.CreatedAt - f.Fingerprints = otherBase.Fingerprints - *otherBase = *f - - if err := s.queueDBOperation(ctx, f.Path, func(ctx context.Context) error { - if err := s.Repository.Update(ctx, other); err != nil { - return fmt.Errorf("updating file for rename %q: %w", f.Path, err) - } - - return nil - }); err != nil { - return nil, err - } - - return other, nil - case n > 1: - // multiple candidates - // TODO - mark all as missing and just create a new file + if n == 0 { + // no missing files, not a rename return nil, nil } - return nil, nil + // assume does not exist, update existing file + // it's possible that there may be multiple missing files. + // just use the first one to rename. + other := missing[0] + otherBase := other.Base() + + fBase := f.Base() + + logger.Infof("%s moved to %s. Updating path...", otherBase.Path, fBase.Path) + fBase.ID = otherBase.ID + fBase.CreatedAt = otherBase.CreatedAt + fBase.Fingerprints = otherBase.Fingerprints + + if err := s.queueDBOperation(ctx, fBase.Path, func(ctx context.Context) error { + if err := s.Repository.Update(ctx, f); err != nil { + return fmt.Errorf("updating file for rename %q: %w", fBase.Path, err) + } + + return nil + }); err != nil { + return nil, err + } + + return f, nil +} + +func (s *scanJob) isHandlerRequired(ctx context.Context, f File) bool { + accept := len(s.options.HandlerRequiredFilters) == 0 + for _, filter := range s.options.HandlerRequiredFilters { + // accept if any filter accepts the file + if filter.Accept(ctx, f) { + accept = true + break + } + } + + return accept } // returns a file only if it was updated @@ -802,7 +817,31 @@ func (s *scanJob) onExistingFile(ctx context.Context, f scanFile, existing File) updated := !fileModTime.Equal(base.ModTime) if !updated { - s.incrementProgress() + handlerRequired := false + if err := s.withDB(ctx, func(ctx context.Context) error { + // check if the handler needs to be run + handlerRequired = s.isHandlerRequired(ctx, existing) + return nil + }); err != nil { + return nil, err + } + + if !handlerRequired { + s.incrementProgress() + return nil, nil + } + + if err := s.queueDBOperation(ctx, path, func(ctx context.Context) error { + if err := s.fireHandlers(ctx, existing); err != nil { + return err + } + + s.incrementProgress() + return nil + }); err != nil { + return nil, err + } + return nil, nil } diff --git a/pkg/gallery/delete.go b/pkg/gallery/delete.go index ada123eed..e056da369 100644 --- a/pkg/gallery/delete.go +++ b/pkg/gallery/delete.go @@ -3,6 +3,7 @@ package gallery import ( "context" + "github.com/stashapp/stash/pkg/file" "github.com/stashapp/stash/pkg/image" "github.com/stashapp/stash/pkg/models" ) @@ -10,12 +11,8 @@ import ( func (s *Service) Destroy(ctx context.Context, i *models.Gallery, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile bool) ([]*models.Image, error) { var imgsDestroyed []*models.Image - // TODO - we currently destroy associated files so that they will be rescanned. - // A better way would be to keep the file entries in the database, and recreate - // associated objects during the scan process if there are none already. - // if this is a zip-based gallery, delete the images as well first - zipImgsDestroyed, err := s.destroyZipImages(ctx, i, fileDeleter, deleteGenerated, deleteFile) + zipImgsDestroyed, err := s.destroyZipFileImages(ctx, i, fileDeleter, deleteGenerated, deleteFile) if err != nil { return nil, err } @@ -42,9 +39,14 @@ func (s *Service) Destroy(ctx context.Context, i *models.Gallery, fileDeleter *i return imgsDestroyed, nil } -func (s *Service) destroyZipImages(ctx context.Context, i *models.Gallery, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile bool) ([]*models.Image, error) { +func (s *Service) destroyZipFileImages(ctx context.Context, i *models.Gallery, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile bool) ([]*models.Image, error) { var imgsDestroyed []*models.Image + destroyer := &file.ZipDestroyer{ + FileDestroyer: s.File, + FolderDestroyer: s.Folder, + } + // for zip-based galleries, delete the images as well first for _, f := range i.Files { // only do this where there are no other galleries related to the file @@ -58,21 +60,15 @@ func (s *Service) destroyZipImages(ctx context.Context, i *models.Gallery, fileD continue } - imgs, err := s.ImageFinder.FindByZipFileID(ctx, f.Base().ID) + thisDestroyed, err := s.ImageService.DestroyZipImages(ctx, f, fileDeleter, deleteGenerated) if err != nil { return nil, err } - for _, img := range imgs { - if err := s.ImageService.Destroy(ctx, img, fileDeleter, deleteGenerated, false); err != nil { - return nil, err - } - - imgsDestroyed = append(imgsDestroyed, img) - } + imgsDestroyed = append(imgsDestroyed, thisDestroyed...) if deleteFile { - if err := fileDeleter.Files([]string{f.Base().Path}); err != nil { + if err := destroyer.DestroyZip(ctx, f, fileDeleter.Deleter, deleteFile); err != nil { return nil, err } } diff --git a/pkg/gallery/scan.go b/pkg/gallery/scan.go index 6dd334295..fccbeb756 100644 --- a/pkg/gallery/scan.go +++ b/pkg/gallery/scan.go @@ -64,8 +64,10 @@ func (h *ScanHandler) Handle(ctx context.Context, f file.File) error { UpdatedAt: now, } + logger.Infof("%s doesn't exist. Creating new gallery...", f.Base().Path) + if err := h.CreatorUpdater.Create(ctx, newGallery, []file.ID{baseFile.ID}); err != nil { - return fmt.Errorf("creating new image: %w", err) + return fmt.Errorf("creating new gallery: %w", err) } h.PluginCache.ExecutePostHooks(ctx, newGallery.ID, plugin.GalleryCreatePost, nil, nil) diff --git a/pkg/gallery/service.go b/pkg/gallery/service.go index 6b0f961da..4468bcc20 100644 --- a/pkg/gallery/service.go +++ b/pkg/gallery/service.go @@ -8,8 +8,12 @@ import ( "github.com/stashapp/stash/pkg/models" ) -type Repository interface { +type FinderByFile interface { FindByFileID(ctx context.Context, fileID file.ID) ([]*models.Gallery, error) +} + +type Repository interface { + FinderByFile Destroy(ctx context.Context, id int) error } @@ -20,10 +24,13 @@ type ImageFinder interface { type ImageService interface { Destroy(ctx context.Context, i *models.Image, fileDeleter *image.FileDeleter, deleteGenerated, deleteFile bool) error + DestroyZipImages(ctx context.Context, zipFile file.File, fileDeleter *image.FileDeleter, deleteGenerated bool) ([]*models.Image, error) } type Service struct { Repository Repository ImageFinder ImageFinder ImageService ImageService + File file.Store + Folder file.FolderStore } diff --git a/pkg/image/delete.go b/pkg/image/delete.go index 447ffa578..7f5462de1 100644 --- a/pkg/image/delete.go +++ b/pkg/image/delete.go @@ -33,12 +33,37 @@ func (d *FileDeleter) MarkGeneratedFiles(image *models.Image) error { // Destroy destroys an image, optionally marking the file and generated files for deletion. func (s *Service) Destroy(ctx context.Context, i *models.Image, fileDeleter *FileDeleter, deleteGenerated, deleteFile bool) error { - // TODO - we currently destroy associated files so that they will be rescanned. - // A better way would be to keep the file entries in the database, and recreate - // associated objects during the scan process if there are none already. + return s.destroyImage(ctx, i, fileDeleter, deleteGenerated, deleteFile) +} - if err := s.destroyFiles(ctx, i, fileDeleter, deleteFile); err != nil { - return err +// DestroyZipImages destroys all images in zip, optionally marking the files and generated files for deletion. +// Returns a slice of images that were destroyed. +func (s *Service) DestroyZipImages(ctx context.Context, zipFile file.File, fileDeleter *FileDeleter, deleteGenerated bool) ([]*models.Image, error) { + var imgsDestroyed []*models.Image + + imgs, err := s.Repository.FindByZipFileID(ctx, zipFile.Base().ID) + if err != nil { + return nil, err + } + + for _, img := range imgs { + const deleteFileInZip = false + if err := s.destroyImage(ctx, img, fileDeleter, deleteGenerated, deleteFileInZip); err != nil { + return nil, err + } + + imgsDestroyed = append(imgsDestroyed, img) + } + + return imgsDestroyed, nil +} + +// Destroy destroys an image, optionally marking the file and generated files for deletion. +func (s *Service) destroyImage(ctx context.Context, i *models.Image, fileDeleter *FileDeleter, deleteGenerated, deleteFile bool) error { + if deleteFile { + if err := s.deleteFiles(ctx, i, fileDeleter); err != nil { + return err + } } if deleteGenerated { @@ -50,7 +75,8 @@ func (s *Service) Destroy(ctx context.Context, i *models.Image, fileDeleter *Fil return s.Repository.Destroy(ctx, i.ID) } -func (s *Service) destroyFiles(ctx context.Context, i *models.Image, fileDeleter *FileDeleter, deleteFile bool) error { +// deleteFiles deletes files for the image from the database and file system, if they are not in use by other images +func (s *Service) deleteFiles(ctx context.Context, i *models.Image, fileDeleter *FileDeleter) error { for _, f := range i.Files { // only delete files where there is no other associated image otherImages, err := s.Repository.FindByFileID(ctx, f.ID) @@ -64,7 +90,8 @@ func (s *Service) destroyFiles(ctx context.Context, i *models.Image, fileDeleter } // don't delete files in zip archives - if deleteFile && f.ZipFileID == nil { + const deleteFile = true + if f.ZipFileID == nil { if err := file.Destroy(ctx, s.File, f, fileDeleter.Deleter, deleteFile); err != nil { return err } diff --git a/pkg/image/scan.go b/pkg/image/scan.go index 8e6a95e89..ac30ae523 100644 --- a/pkg/image/scan.go +++ b/pkg/image/scan.go @@ -115,6 +115,8 @@ func (h *ScanHandler) Handle(ctx context.Context, f file.File) error { } } + logger.Infof("%s doesn't exist. Creating new image...", f.Base().Path) + if err := h.CreatorUpdater.Create(ctx, &models.ImageCreateInput{ Image: newImage, FileIDs: []file.ID{imageFile.ID}, diff --git a/pkg/image/service.go b/pkg/image/service.go index 5de330fa2..41cd076ef 100644 --- a/pkg/image/service.go +++ b/pkg/image/service.go @@ -9,6 +9,7 @@ import ( type FinderByFile interface { FindByFileID(ctx context.Context, fileID file.ID) ([]*models.Image, error) + FindByZipFileID(ctx context.Context, zipFileID file.ID) ([]*models.Image, error) } type Repository interface { diff --git a/pkg/scene/delete.go b/pkg/scene/delete.go index 42cd3b277..60156b912 100644 --- a/pkg/scene/delete.go +++ b/pkg/scene/delete.go @@ -140,12 +140,10 @@ func (s *Service) Destroy(ctx context.Context, scene *models.Scene, fileDeleter } } - // TODO - we currently destroy associated files so that they will be rescanned. - // A better way would be to keep the file entries in the database, and recreate - // associated objects during the scan process if there are none already. - - if err := s.destroyFiles(ctx, scene, fileDeleter, deleteFile); err != nil { - return err + if deleteFile { + if err := s.deleteFiles(ctx, scene, fileDeleter); err != nil { + return err + } } if deleteGenerated { @@ -161,7 +159,8 @@ func (s *Service) Destroy(ctx context.Context, scene *models.Scene, fileDeleter return nil } -func (s *Service) destroyFiles(ctx context.Context, scene *models.Scene, fileDeleter *FileDeleter, deleteFile bool) error { +// deleteFiles deletes files from the database and file system +func (s *Service) deleteFiles(ctx context.Context, scene *models.Scene, fileDeleter *FileDeleter) error { for _, f := range scene.Files { // only delete files where there is no other associated scene otherScenes, err := s.Repository.FindByFileID(ctx, f.ID) @@ -174,12 +173,13 @@ func (s *Service) destroyFiles(ctx context.Context, scene *models.Scene, fileDel continue } + const deleteFile = true if err := file.Destroy(ctx, s.File, f, fileDeleter.Deleter, deleteFile); err != nil { return err } // don't delete files in zip archives - if deleteFile && f.ZipFileID == nil { + if f.ZipFileID == nil { funscriptPath := video.GetFunscriptPath(f.Path) funscriptExists, _ := fsutil.FileExists(funscriptPath) if funscriptExists { diff --git a/pkg/scene/scan.go b/pkg/scene/scan.go index 5d3652ddb..e098c7d85 100644 --- a/pkg/scene/scan.go +++ b/pkg/scene/scan.go @@ -88,6 +88,8 @@ func (h *ScanHandler) Handle(ctx context.Context, f file.File) error { UpdatedAt: now, } + logger.Infof("%s doesn't exist. Creating new scene...", f.Base().Path) + if err := h.CreatorUpdater.Create(ctx, newScene, []file.ID{videoFile.ID}); err != nil { return fmt.Errorf("creating new scene: %w", err) } diff --git a/pkg/scraper/stashbox/stash_box.go b/pkg/scraper/stashbox/stash_box.go index e09647c90..f6037f77f 100644 --- a/pkg/scraper/stashbox/stash_box.go +++ b/pkg/scraper/stashbox/stash_box.go @@ -736,151 +736,139 @@ func (c Client) GetUser(ctx context.Context) (*graphql.Me, error) { return c.client.Me(ctx) } -func (c Client) SubmitSceneDraft(ctx context.Context, sceneID int, endpoint string, imagePath string) (*string, error) { +func (c Client) SubmitSceneDraft(ctx context.Context, scene *models.Scene, endpoint string, imagePath string) (*string, error) { draft := graphql.SceneDraftInput{} - var image *os.File - if err := txn.WithTxn(ctx, c.txnManager, func(ctx context.Context) error { - r := c.repository - qb := r.Scene - pqb := r.Performer - sqb := r.Studio + var image io.Reader + r := c.repository + pqb := r.Performer + sqb := r.Studio - scene, err := qb.Find(ctx, sceneID) + if scene.Title != "" { + draft.Title = &scene.Title + } + if scene.Details != "" { + draft.Details = &scene.Details + } + if scene.URL != "" && len(strings.TrimSpace(scene.URL)) > 0 { + url := strings.TrimSpace(scene.URL) + draft.URL = &url + } + if scene.Date != nil { + v := scene.Date.String() + draft.Date = &v + } + + if scene.StudioID != nil { + studio, err := sqb.Find(ctx, int(*scene.StudioID)) if err != nil { - return err + return nil, err + } + studioDraft := graphql.DraftEntityInput{ + Name: studio.Name.String, } - if scene.Title != "" { - draft.Title = &scene.Title - } - if scene.Details != "" { - draft.Details = &scene.Details - } - if scene.URL != "" && len(strings.TrimSpace(scene.URL)) > 0 { - url := strings.TrimSpace(scene.URL) - draft.URL = &url - } - if scene.Date != nil { - v := scene.Date.String() - draft.Date = &v - } - - if scene.StudioID != nil { - studio, err := sqb.Find(ctx, int(*scene.StudioID)) - if err != nil { - return err - } - studioDraft := graphql.DraftEntityInput{ - Name: studio.Name.String, - } - - stashIDs, err := sqb.GetStashIDs(ctx, studio.ID) - if err != nil { - return err - } - for _, stashID := range stashIDs { - if stashID.Endpoint == endpoint { - studioDraft.ID = &stashID.StashID - break - } - } - draft.Studio = &studioDraft - } - - fingerprints := []*graphql.FingerprintInput{} - duration := scene.Duration() - if oshash := scene.OSHash(); oshash != "" && duration != 0 { - fingerprint := graphql.FingerprintInput{ - Hash: oshash, - Algorithm: graphql.FingerprintAlgorithmOshash, - Duration: int(duration), - } - fingerprints = append(fingerprints, &fingerprint) - } - - if checksum := scene.Checksum(); checksum != "" && duration != 0 { - fingerprint := graphql.FingerprintInput{ - Hash: checksum, - Algorithm: graphql.FingerprintAlgorithmMd5, - Duration: int(duration), - } - fingerprints = append(fingerprints, &fingerprint) - } - - if phash := scene.Phash(); phash != 0 && duration != 0 { - fingerprint := graphql.FingerprintInput{ - Hash: utils.PhashToString(phash), - Algorithm: graphql.FingerprintAlgorithmPhash, - Duration: int(duration), - } - fingerprints = append(fingerprints, &fingerprint) - } - draft.Fingerprints = fingerprints - - scenePerformers, err := pqb.FindBySceneID(ctx, sceneID) + stashIDs, err := sqb.GetStashIDs(ctx, studio.ID) if err != nil { - return err + return nil, err } - - performers := []*graphql.DraftEntityInput{} - for _, p := range scenePerformers { - performerDraft := graphql.DraftEntityInput{ - Name: p.Name.String, - } - - stashIDs, err := pqb.GetStashIDs(ctx, p.ID) - if err != nil { - return err - } - - for _, stashID := range stashIDs { - if stashID.Endpoint == endpoint { - performerDraft.ID = &stashID.StashID - break - } - } - - performers = append(performers, &performerDraft) - } - draft.Performers = performers - - var tags []*graphql.DraftEntityInput - sceneTags, err := r.Tag.FindBySceneID(ctx, scene.ID) - if err != nil { - return err - } - for _, tag := range sceneTags { - tags = append(tags, &graphql.DraftEntityInput{Name: tag.Name}) - } - draft.Tags = tags - - exists, _ := fsutil.FileExists(imagePath) - if exists { - file, err := os.Open(imagePath) - if err == nil { - image = file - } - } - - stashIDs := scene.StashIDs - var stashID *string - for _, v := range stashIDs { - if v.Endpoint == endpoint { - vv := v.StashID - stashID = &vv + for _, stashID := range stashIDs { + if stashID.Endpoint == endpoint { + studioDraft.ID = &stashID.StashID break } } - draft.ID = stashID + draft.Studio = &studioDraft + } - return nil - }); err != nil { + fingerprints := []*graphql.FingerprintInput{} + duration := scene.Duration() + if oshash := scene.OSHash(); oshash != "" && duration != 0 { + fingerprint := graphql.FingerprintInput{ + Hash: oshash, + Algorithm: graphql.FingerprintAlgorithmOshash, + Duration: int(duration), + } + fingerprints = append(fingerprints, &fingerprint) + } + + if checksum := scene.Checksum(); checksum != "" && duration != 0 { + fingerprint := graphql.FingerprintInput{ + Hash: checksum, + Algorithm: graphql.FingerprintAlgorithmMd5, + Duration: int(duration), + } + fingerprints = append(fingerprints, &fingerprint) + } + + if phash := scene.Phash(); phash != 0 && duration != 0 { + fingerprint := graphql.FingerprintInput{ + Hash: utils.PhashToString(phash), + Algorithm: graphql.FingerprintAlgorithmPhash, + Duration: int(duration), + } + fingerprints = append(fingerprints, &fingerprint) + } + draft.Fingerprints = fingerprints + + scenePerformers, err := pqb.FindBySceneID(ctx, scene.ID) + if err != nil { return nil, err } + performers := []*graphql.DraftEntityInput{} + for _, p := range scenePerformers { + performerDraft := graphql.DraftEntityInput{ + Name: p.Name.String, + } + + stashIDs, err := pqb.GetStashIDs(ctx, p.ID) + if err != nil { + return nil, err + } + + for _, stashID := range stashIDs { + if stashID.Endpoint == endpoint { + performerDraft.ID = &stashID.StashID + break + } + } + + performers = append(performers, &performerDraft) + } + draft.Performers = performers + + var tags []*graphql.DraftEntityInput + sceneTags, err := r.Tag.FindBySceneID(ctx, scene.ID) + if err != nil { + return nil, err + } + for _, tag := range sceneTags { + tags = append(tags, &graphql.DraftEntityInput{Name: tag.Name}) + } + draft.Tags = tags + + exists, _ := fsutil.FileExists(imagePath) + if exists { + file, err := os.Open(imagePath) + if err == nil { + image = file + } + } + + stashIDs := scene.StashIDs + var stashID *string + for _, v := range stashIDs { + if v.Endpoint == endpoint { + vv := v.StashID + stashID = &vv + break + } + } + draft.ID = stashID + var id *string var ret graphql.SubmitSceneDraft - err := c.submitDraft(ctx, graphql.SubmitSceneDraftDocument, draft, image, &ret) + err = c.submitDraft(ctx, graphql.SubmitSceneDraftDocument, draft, image, &ret) id = ret.SubmitSceneDraft.ID return id, err diff --git a/pkg/sqlite/gallery.go b/pkg/sqlite/gallery.go index 2f0b7a18c..2342d893c 100644 --- a/pkg/sqlite/gallery.go +++ b/pkg/sqlite/gallery.go @@ -388,6 +388,13 @@ func (qb *GalleryStore) FindByFileID(ctx context.Context, fileID file.ID) ([]*mo return ret, nil } +func (qb *GalleryStore) CountByFileID(ctx context.Context, fileID file.ID) (int, error) { + joinTable := galleriesFilesJoinTable + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col(fileIDColumn).Eq(fileID)) + return count(ctx, q) +} + func (qb *GalleryStore) FindByFingerprints(ctx context.Context, fp []file.Fingerprint) ([]*models.Gallery, error) { table := qb.queryTable() diff --git a/pkg/sqlite/image.go b/pkg/sqlite/image.go index 4ba5ce118..7e93e3cb5 100644 --- a/pkg/sqlite/image.go +++ b/pkg/sqlite/image.go @@ -379,6 +379,13 @@ func (qb *ImageStore) FindByFileID(ctx context.Context, fileID file.ID) ([]*mode return ret, nil } +func (qb *ImageStore) CountByFileID(ctx context.Context, fileID file.ID) (int, error) { + joinTable := imagesFilesJoinTable + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col(fileIDColumn).Eq(fileID)) + return count(ctx, q) +} + func (qb *ImageStore) FindByFingerprints(ctx context.Context, fp []file.Fingerprint) ([]*models.Image, error) { table := imagesQueryTable diff --git a/pkg/sqlite/migrations/32_postmigrate.go b/pkg/sqlite/migrations/32_postmigrate.go index fcdeab594..72ef3866c 100644 --- a/pkg/sqlite/migrations/32_postmigrate.go +++ b/pkg/sqlite/migrations/32_postmigrate.go @@ -134,7 +134,6 @@ func (m *schema32Migrator) migrateFiles(ctx context.Context) error { limit = 1000 logEvery = 10000 ) - offset := 0 result := struct { Count int `db:"count"` @@ -146,10 +145,19 @@ func (m *schema32Migrator) migrateFiles(ctx context.Context) error { logger.Infof("Migrating %d files...", result.Count) + lastID := 0 + count := 0 + for { gotSome := false - query := fmt.Sprintf("SELECT `id`, `basename` FROM `files` ORDER BY `id` LIMIT %d OFFSET %d", limit, offset) + // using offset for this is slow. Save the last id and filter by that instead + query := "SELECT `id`, `basename` FROM `files` " + if lastID != 0 { + query += fmt.Sprintf("WHERE `id` > %d ", lastID) + } + + query += fmt.Sprintf("ORDER BY `id` LIMIT %d", limit) if err := m.withTxn(ctx, func(tx *sqlx.Tx) error { rows, err := m.db.Query(query) @@ -188,6 +196,9 @@ func (m *schema32Migrator) migrateFiles(ctx context.Context) error { return err } } + + lastID = id + count++ } return rows.Err() @@ -199,10 +210,8 @@ func (m *schema32Migrator) migrateFiles(ctx context.Context) error { break } - offset += limit - - if offset%logEvery == 0 { - logger.Infof("Migrated %d files", offset) + if count%logEvery == 0 { + logger.Infof("Migrated %d files", count) } } diff --git a/pkg/sqlite/scene.go b/pkg/sqlite/scene.go index c2205dcf8..a563b0c25 100644 --- a/pkg/sqlite/scene.go +++ b/pkg/sqlite/scene.go @@ -470,6 +470,13 @@ func (qb *SceneStore) FindByFileID(ctx context.Context, fileID file.ID) ([]*mode return ret, nil } +func (qb *SceneStore) CountByFileID(ctx context.Context, fileID file.ID) (int, error) { + joinTable := scenesFilesJoinTable + + q := dialect.Select(goqu.COUNT("*")).From(joinTable).Where(joinTable.Col(fileIDColumn).Eq(fileID)) + return count(ctx, q) +} + func (qb *SceneStore) FindByFingerprints(ctx context.Context, fp []file.Fingerprint) ([]*models.Scene, error) { table := qb.queryTable() diff --git a/ui/v2.5/src/components/Settings/Tasks/ScanOptions.tsx b/ui/v2.5/src/components/Settings/Tasks/ScanOptions.tsx index 0e01cfaa0..2f9497ee3 100644 --- a/ui/v2.5/src/components/Settings/Tasks/ScanOptions.tsx +++ b/ui/v2.5/src/components/Settings/Tasks/ScanOptions.tsx @@ -12,8 +12,6 @@ export const ScanOptions: React.FC = ({ setOptions: setOptionsState, }) => { const { - useFileMetadata, - stripFileExtension, scanGeneratePreviews, scanGenerateImagePreviews, scanGenerateSprites, @@ -63,18 +61,6 @@ export const ScanOptions: React.FC = ({ headingID="config.tasks.generate_thumbnails_during_scan" onChange={(v) => setOptions({ scanGenerateThumbnails: v })} /> - setOptions({ stripFileExtension: v })} - /> - setOptions({ useFileMetadata: v })} - /> ); }; diff --git a/ui/v2.5/src/docs/en/Changelog/v0170.md b/ui/v2.5/src/docs/en/Changelog/v0170.md index 3caf33590..880d35dbd 100644 --- a/ui/v2.5/src/docs/en/Changelog/v0170.md +++ b/ui/v2.5/src/docs/en/Changelog/v0170.md @@ -10,8 +10,6 @@ Please report all issues to the following Github issue: https://github.com/stash * Import/export functionality is currently disabled. Needs further design. * Missing covers are not currently regenerated. Need to consider further, especially around scene cover redesign. * Deleting galleries is currently slow. -* Don't include file extension as part of the title scan flag is not supported. -* Set name, date, details from embedded file metadata scan flag is not supported. ### ✨ New Features * Added support for identical files. Identical files are assigned to the same scene/gallery/image and can be viewed in File Info. ([#2676](https://github.com/stashapp/stash/pull/2676)) @@ -19,4 +17,6 @@ Please report all issues to the following Github issue: https://github.com/stash * Added release notes dialog. ([#2726](https://github.com/stashapp/stash/pull/2726)) ### 🎨 Improvements +* Object titles are now displayed as the file basename if the title is not explicitly set. The `Don't include file extension as part of the title` scan flag is no longer supported. +* `Set name, date, details from embedded file metadata` scan flag is no longer supported. This functionality may be implemented as a built-in scraper in the future. * Moved Changelogs to Settings page. ([#2726](https://github.com/stashapp/stash/pull/2726)) \ No newline at end of file diff --git a/ui/v2.5/src/docs/en/ReleaseNotes/v0170.md b/ui/v2.5/src/docs/en/ReleaseNotes/v0170.md index af26e8ade..9b6989dc0 100644 --- a/ui/v2.5/src/docs/en/ReleaseNotes/v0170.md +++ b/ui/v2.5/src/docs/en/ReleaseNotes/v0170.md @@ -10,10 +10,11 @@ Please report all issues to the following Github issue: https://github.com/stash * Import/export functionality is currently disabled. Needs further design. * Missing covers are not currently regenerated. Need to consider further, especially around scene cover redesign. * Deleting galleries is currently slow. -* Don't include file extension as part of the title scan flag is not supported. -* Set name, date, details from embedded file metadata scan flag is not supported. + ### Other changes: * Added support for filtering and sorting by file count. ([#2744](https://github.com/stashapp/stash/pull/2744)) -* Changelog has been moved from the stats page to a section in the Settings page. \ No newline at end of file +* Changelog has been moved from the stats page to a section in the Settings page. +* Object titles are now displayed as the file basename if the title is not explicitly set. The `Don't include file extension as part of the title` scan flag is no longer supported. +* `Set name, date, details from embedded file metadata` scan flag is no longer supported. This functionality may be implemented as a built-in scraper in the future. \ No newline at end of file