mirror of
https://github.com/stashapp/stash.git
synced 2025-12-18 04:44:37 +03:00
Manager refactor, part 1 (#4298)
* Move BackupDatabase and AnonymiseDatabase to internal/manager * Rename config.Instance to config.Config * Rename FFMPEG * Rework manager and initialization process * Fix Makefile * Tweak phasher * Fix config races * Fix setup error not clearing
This commit is contained in:
@@ -4,130 +4,44 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/stashapp/stash/internal/desktop"
|
||||
"github.com/stashapp/stash/internal/dlna"
|
||||
"github.com/stashapp/stash/internal/log"
|
||||
"github.com/stashapp/stash/internal/manager/config"
|
||||
"github.com/stashapp/stash/pkg/ffmpeg"
|
||||
"github.com/stashapp/stash/pkg/file"
|
||||
file_image "github.com/stashapp/stash/pkg/file/image"
|
||||
"github.com/stashapp/stash/pkg/file/video"
|
||||
"github.com/stashapp/stash/pkg/fsutil"
|
||||
"github.com/stashapp/stash/pkg/gallery"
|
||||
"github.com/stashapp/stash/pkg/image"
|
||||
"github.com/stashapp/stash/pkg/job"
|
||||
"github.com/stashapp/stash/pkg/logger"
|
||||
"github.com/stashapp/stash/pkg/models"
|
||||
"github.com/stashapp/stash/pkg/models/paths"
|
||||
"github.com/stashapp/stash/pkg/pkg"
|
||||
"github.com/stashapp/stash/pkg/plugin"
|
||||
"github.com/stashapp/stash/pkg/scene"
|
||||
"github.com/stashapp/stash/pkg/scraper"
|
||||
"github.com/stashapp/stash/pkg/session"
|
||||
"github.com/stashapp/stash/pkg/sqlite"
|
||||
"github.com/stashapp/stash/pkg/utils"
|
||||
"github.com/stashapp/stash/ui"
|
||||
|
||||
// register custom migrations
|
||||
_ "github.com/stashapp/stash/pkg/sqlite/migrations"
|
||||
)
|
||||
|
||||
type SystemStatus struct {
|
||||
DatabaseSchema *int `json:"databaseSchema"`
|
||||
DatabasePath *string `json:"databasePath"`
|
||||
ConfigPath *string `json:"configPath"`
|
||||
AppSchema int `json:"appSchema"`
|
||||
Status SystemStatusEnum `json:"status"`
|
||||
Os string `json:"os"`
|
||||
WorkingDir string `json:"working_dir"`
|
||||
HomeDir string `json:"home_dir"`
|
||||
}
|
||||
|
||||
type SystemStatusEnum string
|
||||
|
||||
const (
|
||||
SystemStatusEnumSetup SystemStatusEnum = "SETUP"
|
||||
SystemStatusEnumNeedsMigration SystemStatusEnum = "NEEDS_MIGRATION"
|
||||
SystemStatusEnumOk SystemStatusEnum = "OK"
|
||||
)
|
||||
|
||||
var AllSystemStatusEnum = []SystemStatusEnum{
|
||||
SystemStatusEnumSetup,
|
||||
SystemStatusEnumNeedsMigration,
|
||||
SystemStatusEnumOk,
|
||||
}
|
||||
|
||||
func (e SystemStatusEnum) IsValid() bool {
|
||||
switch e {
|
||||
case SystemStatusEnumSetup, SystemStatusEnumNeedsMigration, SystemStatusEnumOk:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (e SystemStatusEnum) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func (e *SystemStatusEnum) UnmarshalGQL(v interface{}) error {
|
||||
str, ok := v.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("enums must be strings")
|
||||
}
|
||||
|
||||
*e = SystemStatusEnum(str)
|
||||
if !e.IsValid() {
|
||||
return fmt.Errorf("%s is not a valid SystemStatusEnum", str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e SystemStatusEnum) MarshalGQL(w io.Writer) {
|
||||
fmt.Fprint(w, strconv.Quote(e.String()))
|
||||
}
|
||||
|
||||
type SetupInput struct {
|
||||
// Empty to indicate $HOME/.stash/config.yml default
|
||||
ConfigLocation string `json:"configLocation"`
|
||||
Stashes []*config.StashConfigInput `json:"stashes"`
|
||||
// Empty to indicate default
|
||||
DatabaseFile string `json:"databaseFile"`
|
||||
// Empty to indicate default
|
||||
GeneratedLocation string `json:"generatedLocation"`
|
||||
// Empty to indicate default
|
||||
CacheLocation string `json:"cacheLocation"`
|
||||
|
||||
StoreBlobsInDatabase bool `json:"storeBlobsInDatabase"`
|
||||
// Empty to indicate default
|
||||
BlobsLocation string `json:"blobsLocation"`
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
Config *config.Instance
|
||||
Config *config.Config
|
||||
Logger *log.Logger
|
||||
|
||||
Paths *paths.Paths
|
||||
|
||||
FFMPEG *ffmpeg.FFMpeg
|
||||
FFMpeg *ffmpeg.FFMpeg
|
||||
FFProbe ffmpeg.FFProbe
|
||||
StreamManager *ffmpeg.StreamManager
|
||||
|
||||
JobManager *job.Manager
|
||||
ReadLockManager *fsutil.ReadLockManager
|
||||
|
||||
SessionStore *session.Store
|
||||
|
||||
JobManager *job.Manager
|
||||
DownloadStore *DownloadStore
|
||||
SessionStore *session.Store
|
||||
|
||||
PluginCache *plugin.Cache
|
||||
ScraperCache *scraper.Cache
|
||||
@@ -135,8 +49,6 @@ type Manager struct {
|
||||
PluginPackageManager *pkg.Manager
|
||||
ScraperPackageManager *pkg.Manager
|
||||
|
||||
DownloadStore *DownloadStore
|
||||
|
||||
DLNAService *dlna.Service
|
||||
|
||||
Database *sqlite.Database
|
||||
@@ -146,378 +58,18 @@ type Manager struct {
|
||||
ImageService ImageService
|
||||
GalleryService GalleryService
|
||||
|
||||
Scanner *file.Scanner
|
||||
Cleaner *file.Cleaner
|
||||
|
||||
scanSubs *subscriptionManager
|
||||
}
|
||||
|
||||
var instance *Manager
|
||||
var once sync.Once
|
||||
|
||||
func GetInstance() *Manager {
|
||||
if _, err := Initialize(); err != nil {
|
||||
panic(err)
|
||||
if instance == nil {
|
||||
panic("manager not initialized")
|
||||
}
|
||||
return instance
|
||||
}
|
||||
|
||||
func Initialize() (*Manager, error) {
|
||||
var err error
|
||||
once.Do(func() {
|
||||
err = initialize()
|
||||
})
|
||||
|
||||
return instance, err
|
||||
}
|
||||
|
||||
func initialize() error {
|
||||
ctx := context.TODO()
|
||||
cfg, err := config.Initialize()
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("initializing configuration: %w", err)
|
||||
}
|
||||
|
||||
l := initLog()
|
||||
initProfiling(cfg.GetCPUProfilePath())
|
||||
|
||||
db := sqlite.NewDatabase()
|
||||
repo := db.Repository()
|
||||
|
||||
// start with empty paths
|
||||
emptyPaths := paths.Paths{}
|
||||
|
||||
instance = &Manager{
|
||||
Config: cfg,
|
||||
Logger: l,
|
||||
ReadLockManager: fsutil.NewReadLockManager(),
|
||||
DownloadStore: NewDownloadStore(),
|
||||
PluginCache: plugin.NewCache(cfg),
|
||||
|
||||
Database: db,
|
||||
Repository: repo,
|
||||
Paths: &emptyPaths,
|
||||
|
||||
scanSubs: &subscriptionManager{},
|
||||
}
|
||||
|
||||
instance.SceneService = &scene.Service{
|
||||
File: repo.File,
|
||||
Repository: repo.Scene,
|
||||
MarkerRepository: repo.SceneMarker,
|
||||
PluginCache: instance.PluginCache,
|
||||
Paths: instance.Paths,
|
||||
Config: cfg,
|
||||
}
|
||||
|
||||
instance.ImageService = &image.Service{
|
||||
File: repo.File,
|
||||
Repository: repo.Image,
|
||||
}
|
||||
|
||||
instance.GalleryService = &gallery.Service{
|
||||
Repository: repo.Gallery,
|
||||
ImageFinder: repo.Image,
|
||||
ImageService: instance.ImageService,
|
||||
File: repo.File,
|
||||
Folder: repo.Folder,
|
||||
}
|
||||
|
||||
instance.JobManager = initJobManager()
|
||||
|
||||
sceneServer := SceneServer{
|
||||
TxnManager: repo.TxnManager,
|
||||
SceneCoverGetter: repo.Scene,
|
||||
}
|
||||
|
||||
dlnaRepository := dlna.NewRepository(repo)
|
||||
instance.DLNAService = dlna.NewService(dlnaRepository, cfg, &sceneServer)
|
||||
|
||||
instance.RefreshPluginSourceManager()
|
||||
instance.RefreshScraperSourceManager()
|
||||
|
||||
if !cfg.IsNewSystem() {
|
||||
logger.Infof("using config file: %s", cfg.GetConfigFile())
|
||||
|
||||
if err == nil {
|
||||
err = cfg.Validate()
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initializing configuration: %w", err)
|
||||
}
|
||||
|
||||
if err := instance.PostInit(ctx); err != nil {
|
||||
var migrationNeededErr *sqlite.MigrationNeededError
|
||||
if errors.As(err, &migrationNeededErr) {
|
||||
logger.Warn(err.Error())
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
initSecurity(cfg)
|
||||
} else {
|
||||
cfgFile := cfg.GetConfigFile()
|
||||
if cfgFile != "" {
|
||||
cfgFile += " "
|
||||
}
|
||||
|
||||
// create temporary session store - this will be re-initialised
|
||||
// after config is complete
|
||||
instance.SessionStore = session.NewStore(cfg)
|
||||
|
||||
logger.Warnf("config file %snot found. Assuming new system...", cfgFile)
|
||||
}
|
||||
|
||||
if err = initFFMPEG(ctx); err != nil {
|
||||
logger.Warnf("could not initialize FFMPEG subsystem: %v", err)
|
||||
}
|
||||
|
||||
instance.Scanner = makeScanner(repo, instance.PluginCache)
|
||||
instance.Cleaner = makeCleaner(repo, instance.PluginCache)
|
||||
|
||||
// if DLNA is enabled, start it now
|
||||
if instance.Config.GetDLNADefaultEnabled() {
|
||||
if err := instance.DLNAService.Start(nil); err != nil {
|
||||
logger.Warnf("could not start DLNA service: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func initialisePackageManager(localPath string, srcPathGetter pkg.SourcePathGetter) *pkg.Manager {
|
||||
const timeout = 10 * time.Second
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
return &pkg.Manager{
|
||||
Local: &pkg.Store{
|
||||
BaseDir: localPath,
|
||||
ManifestFile: pkg.ManifestFile,
|
||||
},
|
||||
PackagePathGetter: srcPathGetter,
|
||||
Client: httpClient,
|
||||
}
|
||||
}
|
||||
|
||||
func videoFileFilter(ctx context.Context, f models.File) bool {
|
||||
return useAsVideo(f.Base().Path)
|
||||
}
|
||||
|
||||
func imageFileFilter(ctx context.Context, f models.File) bool {
|
||||
return useAsImage(f.Base().Path)
|
||||
}
|
||||
|
||||
func galleryFileFilter(ctx context.Context, f models.File) bool {
|
||||
return isZip(f.Base().Basename)
|
||||
}
|
||||
|
||||
func makeScanner(repo models.Repository, pluginCache *plugin.Cache) *file.Scanner {
|
||||
return &file.Scanner{
|
||||
Repository: file.NewRepository(repo),
|
||||
FileDecorators: []file.Decorator{
|
||||
&file.FilteredDecorator{
|
||||
Decorator: &video.Decorator{
|
||||
FFProbe: instance.FFProbe,
|
||||
},
|
||||
Filter: file.FilterFunc(videoFileFilter),
|
||||
},
|
||||
&file.FilteredDecorator{
|
||||
Decorator: &file_image.Decorator{
|
||||
FFProbe: instance.FFProbe,
|
||||
},
|
||||
Filter: file.FilterFunc(imageFileFilter),
|
||||
},
|
||||
},
|
||||
FingerprintCalculator: &fingerprintCalculator{instance.Config},
|
||||
FS: &file.OsFS{},
|
||||
}
|
||||
}
|
||||
|
||||
func makeCleaner(repo models.Repository, pluginCache *plugin.Cache) *file.Cleaner {
|
||||
return &file.Cleaner{
|
||||
FS: &file.OsFS{},
|
||||
Repository: file.NewRepository(repo),
|
||||
Handlers: []file.CleanHandler{
|
||||
&cleanHandler{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func initJobManager() *job.Manager {
|
||||
ret := job.NewManager()
|
||||
|
||||
// desktop notifications
|
||||
ctx := context.Background()
|
||||
c := ret.Subscribe(context.Background())
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case j := <-c.RemovedJob:
|
||||
if instance.Config.GetNotificationsEnabled() {
|
||||
cleanDesc := strings.TrimRight(j.Description, ".")
|
||||
|
||||
if j.StartTime == nil {
|
||||
// Task was never started
|
||||
return
|
||||
}
|
||||
|
||||
timeElapsed := j.EndTime.Sub(*j.StartTime)
|
||||
desktop.SendNotification("Task Finished", "Task \""+cleanDesc+"\" is finished in "+formatDuration(timeElapsed)+".")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func formatDuration(t time.Duration) string {
|
||||
return fmt.Sprintf("%02.f:%02.f:%02.f", t.Hours(), t.Minutes(), t.Seconds())
|
||||
}
|
||||
|
||||
func initSecurity(cfg *config.Instance) {
|
||||
if err := session.CheckExternalAccessTripwire(cfg); err != nil {
|
||||
session.LogExternalAccessError(*err)
|
||||
}
|
||||
}
|
||||
|
||||
func initProfiling(cpuProfilePath string) {
|
||||
if cpuProfilePath == "" {
|
||||
return
|
||||
}
|
||||
|
||||
f, err := os.Create(cpuProfilePath)
|
||||
if err != nil {
|
||||
logger.Fatalf("unable to create cpu profile file: %s", err.Error())
|
||||
}
|
||||
|
||||
logger.Infof("profiling to %s", cpuProfilePath)
|
||||
|
||||
// StopCPUProfile is defer called in main
|
||||
if err = pprof.StartCPUProfile(f); err != nil {
|
||||
logger.Warnf("could not start CPU profiling: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func initFFMPEG(ctx context.Context) error {
|
||||
// only do this if we have a config file set
|
||||
if instance.Config.GetConfigFile() != "" {
|
||||
// use same directory as config path
|
||||
configDirectory := instance.Config.GetConfigPath()
|
||||
paths := []string{
|
||||
configDirectory,
|
||||
paths.GetStashHomeDirectory(),
|
||||
}
|
||||
ffmpegPath, ffprobePath := ffmpeg.GetPaths(paths)
|
||||
|
||||
if ffmpegPath == "" || ffprobePath == "" {
|
||||
logger.Infof("couldn't find FFMPEG, attempting to download it")
|
||||
if err := ffmpeg.Download(ctx, configDirectory); err != nil {
|
||||
msg := `Unable to locate / automatically download FFMPEG
|
||||
|
||||
Check the readme for download links.
|
||||
The FFMPEG and FFProbe binaries should be placed in %s
|
||||
|
||||
The error was: %s
|
||||
`
|
||||
logger.Errorf(msg, configDirectory, err)
|
||||
return err
|
||||
} else {
|
||||
// After download get new paths for ffmpeg and ffprobe
|
||||
ffmpegPath, ffprobePath = ffmpeg.GetPaths(paths)
|
||||
}
|
||||
}
|
||||
|
||||
instance.FFMPEG = ffmpeg.NewEncoder(ffmpegPath)
|
||||
instance.FFProbe = ffmpeg.FFProbe(ffprobePath)
|
||||
|
||||
instance.FFMPEG.InitHWSupport(ctx)
|
||||
instance.RefreshStreamManager()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func initLog() *log.Logger {
|
||||
config := config.GetInstance()
|
||||
l := log.NewLogger()
|
||||
l.Init(config.GetLogFile(), config.GetLogOut(), config.GetLogLevel())
|
||||
logger.Logger = l
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// PostInit initialises the paths, caches and txnManager after the initial
|
||||
// configuration has been set. Should only be called if the configuration
|
||||
// is valid.
|
||||
func (s *Manager) PostInit(ctx context.Context) error {
|
||||
if err := s.Config.SetInitialConfig(); err != nil {
|
||||
logger.Warnf("could not set initial configuration: %v", err)
|
||||
}
|
||||
|
||||
*s.Paths = paths.NewPaths(s.Config.GetGeneratedPath(), s.Config.GetBlobsPath())
|
||||
s.RefreshConfig()
|
||||
s.SessionStore = session.NewStore(s.Config)
|
||||
s.PluginCache.RegisterSessionStore(s.SessionStore)
|
||||
|
||||
if err := s.PluginCache.LoadPlugins(); err != nil {
|
||||
logger.Errorf("Error reading plugin configs: %s", err.Error())
|
||||
}
|
||||
|
||||
s.SetBlobStoreOptions()
|
||||
|
||||
s.ScraperCache = instance.initScraperCache()
|
||||
writeStashIcon()
|
||||
|
||||
// clear the downloads and tmp directories
|
||||
// #1021 - only clear these directories if the generated folder is non-empty
|
||||
if s.Config.GetGeneratedPath() != "" {
|
||||
const deleteTimeout = 1 * time.Second
|
||||
|
||||
utils.Timeout(func() {
|
||||
if err := fsutil.EmptyDir(instance.Paths.Generated.Downloads); err != nil {
|
||||
logger.Warnf("could not empty Downloads directory: %v", err)
|
||||
}
|
||||
if err := fsutil.EnsureDir(instance.Paths.Generated.Tmp); err != nil {
|
||||
logger.Warnf("could not create Tmp directory: %v", err)
|
||||
} else {
|
||||
if err := fsutil.EmptyDir(instance.Paths.Generated.Tmp); err != nil {
|
||||
logger.Warnf("could not empty Tmp directory: %v", err)
|
||||
}
|
||||
}
|
||||
}, deleteTimeout, func(done chan struct{}) {
|
||||
logger.Info("Please wait. Deleting temporary files...") // print
|
||||
<-done // and wait for deletion
|
||||
logger.Info("Temporary files deleted.")
|
||||
})
|
||||
}
|
||||
|
||||
database := s.Database
|
||||
if err := database.Open(s.Config.GetDatabasePath()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the proxy if defined in config
|
||||
if s.Config.GetProxy() != "" {
|
||||
os.Setenv("HTTP_PROXY", s.Config.GetProxy())
|
||||
os.Setenv("HTTPS_PROXY", s.Config.GetProxy())
|
||||
os.Setenv("NO_PROXY", s.Config.GetNoProxy())
|
||||
logger.Info("Using HTTP Proxy")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Manager) SetBlobStoreOptions() {
|
||||
storageType := s.Config.GetBlobsStorage()
|
||||
blobsPath := s.Config.GetBlobsPath()
|
||||
@@ -529,59 +81,45 @@ func (s *Manager) SetBlobStoreOptions() {
|
||||
})
|
||||
}
|
||||
|
||||
func writeStashIcon() {
|
||||
iconPath := filepath.Join(instance.Config.GetConfigPath(), "icon.png")
|
||||
err := os.WriteFile(iconPath, ui.FaviconProvider.GetFaviconPng(), 0644)
|
||||
if err != nil {
|
||||
logger.Errorf("Couldn't write icon file: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// initScraperCache initializes a new scraper cache and returns it.
|
||||
func (s *Manager) initScraperCache() *scraper.Cache {
|
||||
scraperRepository := scraper.NewRepository(s.Repository)
|
||||
ret, err := scraper.NewCache(s.Config, scraperRepository)
|
||||
|
||||
if err != nil {
|
||||
logger.Errorf("Error reading scraper configs: %s", err.Error())
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (s *Manager) RefreshConfig() {
|
||||
*s.Paths = paths.NewPaths(s.Config.GetGeneratedPath(), s.Config.GetBlobsPath())
|
||||
config := s.Config
|
||||
if config.Validate() == nil {
|
||||
cfg := s.Config
|
||||
*s.Paths = paths.NewPaths(cfg.GetGeneratedPath(), cfg.GetBlobsPath())
|
||||
if cfg.Validate() == nil {
|
||||
if err := fsutil.EnsureDir(s.Paths.Generated.Screenshots); err != nil {
|
||||
logger.Warnf("could not create directory for Screenshots: %v", err)
|
||||
logger.Warnf("could not create screenshots directory: %v", err)
|
||||
}
|
||||
if err := fsutil.EnsureDir(s.Paths.Generated.Vtt); err != nil {
|
||||
logger.Warnf("could not create directory for VTT: %v", err)
|
||||
logger.Warnf("could not create VTT directory: %v", err)
|
||||
}
|
||||
if err := fsutil.EnsureDir(s.Paths.Generated.Markers); err != nil {
|
||||
logger.Warnf("could not create directory for Markers: %v", err)
|
||||
logger.Warnf("could not create markers directory: %v", err)
|
||||
}
|
||||
if err := fsutil.EnsureDir(s.Paths.Generated.Transcodes); err != nil {
|
||||
logger.Warnf("could not create directory for Transcodes: %v", err)
|
||||
logger.Warnf("could not create transcodes directory: %v", err)
|
||||
}
|
||||
if err := fsutil.EnsureDir(s.Paths.Generated.Downloads); err != nil {
|
||||
logger.Warnf("could not create directory for Downloads: %v", err)
|
||||
logger.Warnf("could not create downloads directory: %v", err)
|
||||
}
|
||||
if err := fsutil.EnsureDir(s.Paths.Generated.InteractiveHeatmap); err != nil {
|
||||
logger.Warnf("could not create directory for Interactive Heatmaps: %v", err)
|
||||
logger.Warnf("could not create interactive heatmaps directory: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RefreshScraperCache refreshes the scraper cache. Call this when scraper
|
||||
// configuration changes.
|
||||
func (s *Manager) RefreshScraperCache() {
|
||||
s.ScraperCache = s.initScraperCache()
|
||||
// RefreshPluginCache refreshes the plugin cache.
|
||||
// Call this when the plugin configuration changes.
|
||||
func (s *Manager) RefreshPluginCache() {
|
||||
s.PluginCache.ReloadPlugins()
|
||||
}
|
||||
|
||||
// RefreshStreamManager refreshes the stream manager. Call this when cache directory
|
||||
// changes.
|
||||
// RefreshScraperCache refreshes the scraper cache.
|
||||
// Call this when the scraper configuration changes.
|
||||
func (s *Manager) RefreshScraperCache() {
|
||||
s.ScraperCache.ReloadScrapers()
|
||||
}
|
||||
|
||||
// RefreshStreamManager refreshes the stream manager.
|
||||
// Call this when the cache directory changes.
|
||||
func (s *Manager) RefreshStreamManager() {
|
||||
// shutdown existing manager if needed
|
||||
if s.StreamManager != nil {
|
||||
@@ -589,8 +127,22 @@ func (s *Manager) RefreshStreamManager() {
|
||||
s.StreamManager = nil
|
||||
}
|
||||
|
||||
cacheDir := s.Config.GetCachePath()
|
||||
s.StreamManager = ffmpeg.NewStreamManager(cacheDir, s.FFMPEG, s.FFProbe, s.Config, s.ReadLockManager)
|
||||
cfg := s.Config
|
||||
cacheDir := cfg.GetCachePath()
|
||||
s.StreamManager = ffmpeg.NewStreamManager(cacheDir, s.FFMpeg, s.FFProbe, cfg, s.ReadLockManager)
|
||||
}
|
||||
|
||||
// RefreshDLNA starts/stops the DLNA service as needed.
|
||||
func (s *Manager) RefreshDLNA() {
|
||||
dlnaService := s.DLNAService
|
||||
enabled := s.Config.GetDLNADefaultEnabled()
|
||||
if !enabled && dlnaService.IsRunning() {
|
||||
dlnaService.Stop(nil)
|
||||
} else if enabled && !dlnaService.IsRunning() {
|
||||
if err := dlnaService.Start(nil); err != nil {
|
||||
logger.Warnf("error starting DLNA service: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Manager) RefreshScraperSourceManager() {
|
||||
@@ -625,7 +177,11 @@ func setSetupDefaults(input *SetupInput) {
|
||||
|
||||
func (s *Manager) Setup(ctx context.Context, input SetupInput) error {
|
||||
setSetupDefaults(&input)
|
||||
c := s.Config
|
||||
cfg := s.Config
|
||||
|
||||
if err := cfg.SetInitialConfig(); err != nil {
|
||||
return fmt.Errorf("error setting initial configuration: %v", err)
|
||||
}
|
||||
|
||||
// create the config directory if it does not exist
|
||||
// don't do anything if config is already set in the environment
|
||||
@@ -652,7 +208,7 @@ func (s *Manager) Setup(ctx context.Context, input SetupInput) error {
|
||||
}
|
||||
|
||||
// create the generated directory if it does not exist
|
||||
if !c.HasOverride(config.Generated) {
|
||||
if !cfg.HasOverride(config.Generated) {
|
||||
if exists, _ := fsutil.DirExists(input.GeneratedLocation); !exists {
|
||||
if err := os.MkdirAll(input.GeneratedLocation, 0755); err != nil {
|
||||
return fmt.Errorf("error creating generated directory: %v", err)
|
||||
@@ -663,75 +219,60 @@ func (s *Manager) Setup(ctx context.Context, input SetupInput) error {
|
||||
}
|
||||
|
||||
// create the cache directory if it does not exist
|
||||
if !c.HasOverride(config.Cache) {
|
||||
if !cfg.HasOverride(config.Cache) {
|
||||
if exists, _ := fsutil.DirExists(input.CacheLocation); !exists {
|
||||
if err := os.MkdirAll(input.CacheLocation, 0755); err != nil {
|
||||
return fmt.Errorf("error creating cache directory: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
s.Config.Set(config.Cache, input.CacheLocation)
|
||||
cfg.Set(config.Cache, input.CacheLocation)
|
||||
}
|
||||
|
||||
if input.StoreBlobsInDatabase {
|
||||
s.Config.Set(config.BlobsStorage, config.BlobStorageTypeDatabase)
|
||||
cfg.Set(config.BlobsStorage, config.BlobStorageTypeDatabase)
|
||||
} else {
|
||||
if !c.HasOverride(config.BlobsPath) {
|
||||
if !cfg.HasOverride(config.BlobsPath) {
|
||||
if exists, _ := fsutil.DirExists(input.BlobsLocation); !exists {
|
||||
if err := os.MkdirAll(input.BlobsLocation, 0755); err != nil {
|
||||
return fmt.Errorf("error creating blobs directory: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
s.Config.Set(config.BlobsPath, input.BlobsLocation)
|
||||
cfg.Set(config.BlobsPath, input.BlobsLocation)
|
||||
}
|
||||
|
||||
s.Config.Set(config.BlobsStorage, config.BlobStorageTypeFilesystem)
|
||||
cfg.Set(config.BlobsStorage, config.BlobStorageTypeFilesystem)
|
||||
}
|
||||
|
||||
// set the configuration
|
||||
if !c.HasOverride(config.Database) {
|
||||
s.Config.Set(config.Database, input.DatabaseFile)
|
||||
if !cfg.HasOverride(config.Database) {
|
||||
cfg.Set(config.Database, input.DatabaseFile)
|
||||
}
|
||||
|
||||
s.Config.Set(config.Stash, input.Stashes)
|
||||
if err := s.Config.Write(); err != nil {
|
||||
cfg.Set(config.Stash, input.Stashes)
|
||||
|
||||
if err := cfg.Write(); err != nil {
|
||||
return fmt.Errorf("error writing configuration file: %v", err)
|
||||
}
|
||||
|
||||
// initialise the database
|
||||
if err := s.PostInit(ctx); err != nil {
|
||||
var migrationNeededErr *sqlite.MigrationNeededError
|
||||
if errors.As(err, &migrationNeededErr) {
|
||||
logger.Warn(err.Error())
|
||||
} else {
|
||||
return fmt.Errorf("error initializing the database: %v", err)
|
||||
}
|
||||
// finish initialization
|
||||
if err := s.postInit(ctx); err != nil {
|
||||
return fmt.Errorf("error completing initialization: %v", err)
|
||||
}
|
||||
|
||||
s.Config.FinalizeSetup()
|
||||
|
||||
if err := initFFMPEG(ctx); err != nil {
|
||||
return fmt.Errorf("error initializing FFMPEG subsystem: %v", err)
|
||||
}
|
||||
|
||||
instance.Scanner = makeScanner(instance.Repository, instance.PluginCache)
|
||||
cfg.FinalizeSetup()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Manager) validateFFMPEG() error {
|
||||
if s.FFMPEG == nil || s.FFProbe == "" {
|
||||
func (s *Manager) validateFFmpeg() error {
|
||||
if s.FFMpeg == nil || s.FFProbe == "" {
|
||||
return errors.New("missing ffmpeg and/or ffprobe")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type MigrateInput struct {
|
||||
BackupPath string `json:"backupPath"`
|
||||
}
|
||||
|
||||
func (s *Manager) Migrate(ctx context.Context, input MigrateInput) error {
|
||||
database := s.Database
|
||||
|
||||
@@ -778,6 +319,76 @@ func (s *Manager) Migrate(ctx context.Context, input MigrateInput) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Manager) BackupDatabase(download bool) (string, string, error) {
|
||||
var backupPath string
|
||||
var backupName string
|
||||
if download {
|
||||
backupDir := s.Paths.Generated.Downloads
|
||||
if err := fsutil.EnsureDir(backupDir); err != nil {
|
||||
return "", "", fmt.Errorf("could not create backup directory %v: %w", backupDir, err)
|
||||
}
|
||||
f, err := os.CreateTemp(backupDir, "backup*.sqlite")
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
backupPath = f.Name()
|
||||
backupName = s.Database.DatabaseBackupPath("")
|
||||
f.Close()
|
||||
} else {
|
||||
backupDir := s.Config.GetBackupDirectoryPathOrDefault()
|
||||
if backupDir != "" {
|
||||
if err := fsutil.EnsureDir(backupDir); err != nil {
|
||||
return "", "", fmt.Errorf("could not create backup directory %v: %w", backupDir, err)
|
||||
}
|
||||
}
|
||||
backupPath = s.Database.DatabaseBackupPath(backupDir)
|
||||
backupName = filepath.Base(backupPath)
|
||||
}
|
||||
|
||||
err := s.Database.Backup(backupPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return backupPath, backupName, nil
|
||||
}
|
||||
|
||||
func (s *Manager) AnonymiseDatabase(download bool) (string, string, error) {
|
||||
var outPath string
|
||||
var outName string
|
||||
if download {
|
||||
outDir := s.Paths.Generated.Downloads
|
||||
if err := fsutil.EnsureDir(outDir); err != nil {
|
||||
return "", "", fmt.Errorf("could not create output directory %v: %w", outDir, err)
|
||||
}
|
||||
f, err := os.CreateTemp(outDir, "anonymous*.sqlite")
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
outPath = f.Name()
|
||||
outName = s.Database.AnonymousDatabasePath("")
|
||||
f.Close()
|
||||
} else {
|
||||
outDir := s.Config.GetBackupDirectoryPathOrDefault()
|
||||
if outDir != "" {
|
||||
if err := fsutil.EnsureDir(outDir); err != nil {
|
||||
return "", "", fmt.Errorf("could not create output directory %v: %w", outDir, err)
|
||||
}
|
||||
}
|
||||
outPath = s.Database.AnonymousDatabasePath(outDir)
|
||||
outName = filepath.Base(outPath)
|
||||
}
|
||||
|
||||
err := s.Database.Anonymise(outPath)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return outPath, outName, nil
|
||||
}
|
||||
|
||||
func (s *Manager) GetSystemStatus() *SystemStatus {
|
||||
workingDir := fsutil.GetWorkingDirectory()
|
||||
homeDir := fsutil.GetHomeDirectory()
|
||||
@@ -809,24 +420,16 @@ func (s *Manager) GetSystemStatus() *SystemStatus {
|
||||
}
|
||||
|
||||
// Shutdown gracefully stops the manager
|
||||
func (s *Manager) Shutdown(code int) {
|
||||
// stop any profiling at exit
|
||||
pprof.StopCPUProfile()
|
||||
func (s *Manager) Shutdown() {
|
||||
// TODO: Each part of the manager needs to gracefully stop at some point
|
||||
|
||||
if s.StreamManager != nil {
|
||||
s.StreamManager.Shutdown()
|
||||
s.StreamManager = nil
|
||||
}
|
||||
|
||||
// TODO: Each part of the manager needs to gracefully stop at some point
|
||||
// for now, we just close the database.
|
||||
err := s.Database.Close()
|
||||
if err != nil {
|
||||
logger.Errorf("Error closing database: %s", err)
|
||||
if code == 0 {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user