Images section (#813)

* Add new configuration options
* Refactor scan/clean
* Schema changes
* Add details to galleries
* Remove redundant code
* Refine thumbnail generation
* Gallery overhaul
* Don't allow modifying zip gallery images
* Show gallery card overlays
* Hide zoom slider when not in grid mode
This commit is contained in:
WithoutPants
2020-10-13 10:12:46 +11:00
committed by GitHub
parent df3252e24f
commit aca2c7c5f4
147 changed files with 12483 additions and 946 deletions

View File

@@ -11,6 +11,8 @@ import (
"sync"
"time"
"github.com/stashapp/stash/pkg/gallery"
"github.com/stashapp/stash/pkg/image"
"github.com/stashapp/stash/pkg/logger"
"github.com/stashapp/stash/pkg/manager/config"
"github.com/stashapp/stash/pkg/manager/jsonschema"
@@ -34,6 +36,7 @@ type ExportTask struct {
fileNamingAlgorithm models.HashAlgorithm
scenes *exportSpec
images *exportSpec
performers *exportSpec
movies *exportSpec
tags *exportSpec
@@ -75,6 +78,7 @@ func CreateExportTask(a models.HashAlgorithm, input models.ExportObjectsInput) *
return &ExportTask{
fileNamingAlgorithm: a,
scenes: newExportSpec(input.Scenes),
images: newExportSpec(input.Images),
performers: newExportSpec(input.Performers),
movies: newExportSpec(input.Movies),
tags: newExportSpec(input.Tags),
@@ -122,7 +126,8 @@ func (t *ExportTask) Start(wg *sync.WaitGroup) {
paths.EnsureJSONDirs(t.baseDir)
t.ExportScenes(workerCount)
t.ExportGalleries()
t.ExportImages(workerCount)
t.ExportGalleries(workerCount)
t.ExportPerformers(workerCount)
t.ExportStudios(workerCount)
t.ExportMovies(workerCount)
@@ -183,6 +188,7 @@ func (t *ExportTask) zipFiles(w io.Writer) error {
filepath.Walk(t.json.json.Studios, t.zipWalkFunc(u.json.Studios, z))
filepath.Walk(t.json.json.Movies, t.zipWalkFunc(u.json.Movies, z))
filepath.Walk(t.json.json.Scenes, t.zipWalkFunc(u.json.Scenes, z))
filepath.Walk(t.json.json.Images, t.zipWalkFunc(u.json.Images, z))
return nil
}
@@ -257,7 +263,7 @@ func (t *ExportTask) ExportScenes(workers int) {
if (i % 100) == 0 { // make progress easier to read
logger.Progressf("[scenes] %d of %d", index, len(scenes))
}
t.Mappings.Scenes = append(t.Mappings.Scenes, jsonschema.PathMapping{Path: scene.Path, Checksum: scene.GetHash(t.fileNamingAlgorithm)})
t.Mappings.Scenes = append(t.Mappings.Scenes, jsonschema.PathNameMapping{Path: scene.Path, Checksum: scene.GetHash(t.fileNamingAlgorithm)})
jobCh <- scene // feed workers
}
@@ -366,7 +372,127 @@ func exportScene(wg *sync.WaitGroup, jobChan <-chan *models.Scene, t *ExportTask
}
}
func (t *ExportTask) ExportGalleries() {
func (t *ExportTask) ExportImages(workers int) {
var imagesWg sync.WaitGroup
imageReader := models.NewImageReaderWriter(nil)
var images []*models.Image
var err error
all := t.full || (t.images != nil && t.images.all)
if all {
images, err = imageReader.All()
} else if t.images != nil && len(t.images.IDs) > 0 {
images, err = imageReader.FindMany(t.images.IDs)
}
if err != nil {
logger.Errorf("[images] failed to fetch images: %s", err.Error())
}
jobCh := make(chan *models.Image, workers*2) // make a buffered channel to feed workers
logger.Info("[images] exporting")
startTime := time.Now()
for w := 0; w < workers; w++ { // create export Image workers
imagesWg.Add(1)
go exportImage(&imagesWg, jobCh, t)
}
for i, image := range images {
index := i + 1
if (i % 100) == 0 { // make progress easier to read
logger.Progressf("[images] %d of %d", index, len(images))
}
t.Mappings.Images = append(t.Mappings.Images, jsonschema.PathNameMapping{Path: image.Path, Checksum: image.Checksum})
jobCh <- image // feed workers
}
close(jobCh) // close channel so that workers will know no more jobs are available
imagesWg.Wait()
logger.Infof("[images] export complete in %s. %d workers used.", time.Since(startTime), workers)
}
func exportImage(wg *sync.WaitGroup, jobChan <-chan *models.Image, t *ExportTask) {
defer wg.Done()
studioReader := models.NewStudioReaderWriter(nil)
galleryReader := models.NewGalleryReaderWriter(nil)
performerReader := models.NewPerformerReaderWriter(nil)
tagReader := models.NewTagReaderWriter(nil)
for s := range jobChan {
imageHash := s.Checksum
newImageJSON := image.ToBasicJSON(s)
var err error
newImageJSON.Studio, err = image.GetStudioName(studioReader, s)
if err != nil {
logger.Errorf("[images] <%s> error getting image studio name: %s", imageHash, err.Error())
continue
}
imageGalleries, err := galleryReader.FindByImageID(s.ID)
if err != nil {
logger.Errorf("[images] <%s> error getting image galleries: %s", imageHash, err.Error())
continue
}
newImageJSON.Galleries = t.getGalleryChecksums(imageGalleries)
performers, err := performerReader.FindByImageID(s.ID)
if err != nil {
logger.Errorf("[images] <%s> error getting image performer names: %s", imageHash, err.Error())
continue
}
newImageJSON.Performers = performer.GetNames(performers)
tags, err := tagReader.FindByImageID(s.ID)
if err != nil {
logger.Errorf("[images] <%s> error getting image tag names: %s", imageHash, err.Error())
continue
}
newImageJSON.Tags = tag.GetNames(tags)
if t.includeDependencies {
if s.StudioID.Valid {
t.studios.IDs = utils.IntAppendUnique(t.studios.IDs, int(s.StudioID.Int64))
}
// if imageGallery != nil {
// t.galleries.IDs = utils.IntAppendUnique(t.galleries.IDs, imageGallery.ID)
// }
t.tags.IDs = utils.IntAppendUniques(t.tags.IDs, tag.GetIDs(tags))
t.performers.IDs = utils.IntAppendUniques(t.performers.IDs, performer.GetIDs(performers))
}
imageJSON, err := t.json.getImage(imageHash)
if err == nil && jsonschema.CompareJSON(*imageJSON, *newImageJSON) {
continue
}
if err := t.json.saveImage(imageHash, newImageJSON); err != nil {
logger.Errorf("[images] <%s> failed to save json: %s", imageHash, err.Error())
}
}
}
func (t *ExportTask) getGalleryChecksums(galleries []*models.Gallery) (ret []string) {
for _, g := range galleries {
ret = append(ret, g.Checksum)
}
return
}
func (t *ExportTask) ExportGalleries(workers int) {
var galleriesWg sync.WaitGroup
reader := models.NewGalleryReaderWriter(nil)
var galleries []*models.Gallery
@@ -382,15 +508,92 @@ func (t *ExportTask) ExportGalleries() {
logger.Errorf("[galleries] failed to fetch galleries: %s", err.Error())
}
jobCh := make(chan *models.Gallery, workers*2) // make a buffered channel to feed workers
logger.Info("[galleries] exporting")
startTime := time.Now()
for w := 0; w < workers; w++ { // create export Scene workers
galleriesWg.Add(1)
go exportGallery(&galleriesWg, jobCh, t)
}
for i, gallery := range galleries {
index := i + 1
logger.Progressf("[galleries] %d of %d", index, len(galleries))
t.Mappings.Galleries = append(t.Mappings.Galleries, jsonschema.PathMapping{Path: gallery.Path, Checksum: gallery.Checksum})
if (i % 100) == 0 { // make progress easier to read
logger.Progressf("[galleries] %d of %d", index, len(galleries))
}
t.Mappings.Galleries = append(t.Mappings.Galleries, jsonschema.PathNameMapping{
Path: gallery.Path.String,
Name: gallery.Title.String,
Checksum: gallery.Checksum,
})
jobCh <- gallery
}
logger.Infof("[galleries] export complete")
close(jobCh) // close channel so that workers will know no more jobs are available
galleriesWg.Wait()
logger.Infof("[galleries] export complete in %s. %d workers used.", time.Since(startTime), workers)
}
func exportGallery(wg *sync.WaitGroup, jobChan <-chan *models.Gallery, t *ExportTask) {
defer wg.Done()
studioReader := models.NewStudioReaderWriter(nil)
performerReader := models.NewPerformerReaderWriter(nil)
tagReader := models.NewTagReaderWriter(nil)
for g := range jobChan {
galleryHash := g.Checksum
newGalleryJSON, err := gallery.ToBasicJSON(g)
if err != nil {
logger.Errorf("[galleries] <%s> error getting gallery JSON: %s", galleryHash, err.Error())
continue
}
newGalleryJSON.Studio, err = gallery.GetStudioName(studioReader, g)
if err != nil {
logger.Errorf("[galleries] <%s> error getting gallery studio name: %s", galleryHash, err.Error())
continue
}
performers, err := performerReader.FindByGalleryID(g.ID)
if err != nil {
logger.Errorf("[galleries] <%s> error getting gallery performer names: %s", galleryHash, err.Error())
continue
}
newGalleryJSON.Performers = performer.GetNames(performers)
tags, err := tagReader.FindByGalleryID(g.ID)
if err != nil {
logger.Errorf("[galleries] <%s> error getting gallery tag names: %s", galleryHash, err.Error())
continue
}
newGalleryJSON.Tags = tag.GetNames(tags)
if t.includeDependencies {
if g.StudioID.Valid {
t.studios.IDs = utils.IntAppendUnique(t.studios.IDs, int(g.StudioID.Int64))
}
t.tags.IDs = utils.IntAppendUniques(t.tags.IDs, tag.GetIDs(tags))
t.performers.IDs = utils.IntAppendUniques(t.performers.IDs, performer.GetIDs(performers))
}
galleryJSON, err := t.json.getGallery(galleryHash)
if err == nil && jsonschema.CompareJSON(*galleryJSON, *newGalleryJSON) {
continue
}
if err := t.json.saveGallery(galleryHash, newGalleryJSON); err != nil {
logger.Errorf("[galleries] <%s> failed to save json: %s", galleryHash, err.Error())
}
}
}
func (t *ExportTask) ExportPerformers(workers int) {
@@ -423,7 +626,7 @@ func (t *ExportTask) ExportPerformers(workers int) {
index := i + 1
logger.Progressf("[performers] %d of %d", index, len(performers))
t.Mappings.Performers = append(t.Mappings.Performers, jsonschema.NameMapping{Name: performer.Name.String, Checksum: performer.Checksum})
t.Mappings.Performers = append(t.Mappings.Performers, jsonschema.PathNameMapping{Name: performer.Name.String, Checksum: performer.Checksum})
jobCh <- performer // feed workers
}
@@ -490,7 +693,7 @@ func (t *ExportTask) ExportStudios(workers int) {
index := i + 1
logger.Progressf("[studios] %d of %d", index, len(studios))
t.Mappings.Studios = append(t.Mappings.Studios, jsonschema.NameMapping{Name: studio.Name.String, Checksum: studio.Checksum})
t.Mappings.Studios = append(t.Mappings.Studios, jsonschema.PathNameMapping{Name: studio.Name.String, Checksum: studio.Checksum})
jobCh <- studio // feed workers
}
@@ -558,7 +761,7 @@ func (t *ExportTask) ExportTags(workers int) {
// generate checksum on the fly by name, since we don't store it
checksum := utils.MD5FromString(tag.Name)
t.Mappings.Tags = append(t.Mappings.Tags, jsonschema.NameMapping{Name: tag.Name, Checksum: checksum})
t.Mappings.Tags = append(t.Mappings.Tags, jsonschema.PathNameMapping{Name: tag.Name, Checksum: checksum})
jobCh <- tag // feed workers
}
@@ -626,7 +829,7 @@ func (t *ExportTask) ExportMovies(workers int) {
index := i + 1
logger.Progressf("[movies] %d of %d", index, len(movies))
t.Mappings.Movies = append(t.Mappings.Movies, jsonschema.NameMapping{Name: movie.Name.String, Checksum: movie.Checksum})
t.Mappings.Movies = append(t.Mappings.Movies, jsonschema.PathNameMapping{Name: movie.Name.String, Checksum: movie.Checksum})
jobCh <- movie // feed workers
}