[Files Refactor] Performance tuning (#2819)

* Load scene relationships on demand
* Load image relationships on demand
* Load gallery relationships on demand
* Add dataloaden
* Use dataloaders
* Use where in for other find many functions
This commit is contained in:
WithoutPants
2022-08-12 12:21:46 +10:00
parent 9b31b20fed
commit 00608c167a
317 changed files with 28002 additions and 14875 deletions

2
vendor/github.com/vektah/dataloaden/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,2 @@
/vendor
/.idea

97
vendor/github.com/vektah/dataloaden/README.md generated vendored Normal file
View File

@@ -0,0 +1,97 @@
### The DATALOADer gENerator [![CircleCI](https://circleci.com/gh/Vektah/dataloaden.svg?style=svg)](https://circleci.com/gh/vektah/dataloaden) [![Go Report Card](https://goreportcard.com/badge/github.com/vektah/dataloaden)](https://goreportcard.com/report/github.com/vektah/dataloaden) [![codecov](https://codecov.io/gh/vektah/dataloaden/branch/master/graph/badge.svg)](https://codecov.io/gh/vektah/dataloaden)
Requires golang 1.11+ for modules support.
This is a tool for generating type safe data loaders for go, inspired by https://github.com/facebook/dataloader.
The intended use is in graphql servers, to reduce the number of queries being sent to the database. These dataloader
objects should be request scoped and short lived. They should be cheap to create in every request even if they dont
get used.
#### Getting started
From inside the package you want to have the dataloader in:
```bash
go run github.com/vektah/dataloaden UserLoader string *github.com/dataloaden/example.User
```
This will generate a dataloader called `UserLoader` that looks up `*github.com/dataloaden/example.User`'s objects
based on a `string` key.
In another file in the same package, create the constructor method:
```go
func NewUserLoader() *UserLoader {
return &UserLoader{
wait: 2 * time.Millisecond,
maxBatch: 100,
fetch: func(keys []string) ([]*User, []error) {
users := make([]*User, len(keys))
errors := make([]error, len(keys))
for i, key := range keys {
users[i] = &User{ID: key, Name: "user " + key}
}
return users, errors
},
}
}
```
Then wherever you want to call the dataloader
```go
loader := NewUserLoader()
user, err := loader.Load("123")
```
This method will block for a short amount of time, waiting for any other similar requests to come in, call your fetch
function once. It also caches values and wont request duplicates in a batch.
#### Returning Slices
You may want to generate a dataloader that returns slices instead of single values. Both key and value types can be a
simple go type expression:
```bash
go run github.com/vektah/dataloaden UserSliceLoader string []*github.com/dataloaden/example.User
```
Now each key is expected to return a slice of values and the `fetch` function has the return type `[][]*User`.
#### Using with go modules
Create a tools.go that looks like this:
```go
// +build tools
package main
import _ "github.com/vektah/dataloaden"
```
This will allow go modules to see the dependency.
You can invoke it from anywhere within your module now using `go run github.com/vektah/dataloaden` and
always get the pinned version.
#### Wait, how do I use context with this?
I don't think context makes sense to be passed through a data loader. Consider a few scenarios:
1. a dataloader shared between requests: request A and B both get batched together, which context should be passed to the DB? context.Background is probably more suitable.
2. a dataloader per request for graphql: two different nodes in the graph get batched together, they have different context for tracing purposes, which should be passed to the db? neither, you should just use the root request context.
So be explicit about your context:
```go
func NewLoader(ctx context.Context) *UserLoader {
return &UserLoader{
wait: 2 * time.Millisecond,
maxBatch: 100,
fetch: func(keys []string) ([]*User, []error) {
// you now have a ctx to work with
},
}
}
```
If you feel like I'm wrong please raise an issue.

32
vendor/github.com/vektah/dataloaden/appveyor.yml generated vendored Normal file
View File

@@ -0,0 +1,32 @@
version: "{build}"
# Source Config
skip_branch_with_pr: true
clone_folder: c:\projects\dataloaden
# Build host
environment:
GOPATH: c:\gopath
GOVERSION: 1.11.5
PATH: '%PATH%;c:\gopath\bin'
init:
- git config --global core.autocrlf input
# Build
install:
# Install the specific Go version.
- rmdir c:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi
- msiexec /i go%GOVERSION%.windows-amd64.msi /q
- go version
build: false
deploy: false
test_script:
- go generate ./...
- go test -parallel 8 ./...

28
vendor/github.com/vektah/dataloaden/dataloaden.go generated vendored Normal file
View File

@@ -0,0 +1,28 @@
package main
import (
"fmt"
"os"
"github.com/vektah/dataloaden/pkg/generator"
)
func main() {
if len(os.Args) != 4 {
fmt.Println("usage: name keyType valueType")
fmt.Println(" example:")
fmt.Println(" dataloaden 'UserLoader int []*github.com/my/package.User'")
os.Exit(1)
}
wd, err := os.Getwd()
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(2)
}
if err := generator.Generate(os.Args[1], os.Args[2], os.Args[3], wd); err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(2)
}
}

7
vendor/github.com/vektah/dataloaden/licence.md generated vendored Normal file
View File

@@ -0,0 +1,7 @@
Copyright (c) 2017 Adam Scarr
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,163 @@
package generator
import (
"bytes"
"fmt"
"io/ioutil"
"path/filepath"
"regexp"
"strings"
"unicode"
"github.com/pkg/errors"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/imports"
)
type templateData struct {
Package string
Name string
KeyType *goType
ValType *goType
}
type goType struct {
Modifiers string
ImportPath string
ImportName string
Name string
}
func (t *goType) String() string {
if t.ImportName != "" {
return t.Modifiers + t.ImportName + "." + t.Name
}
return t.Modifiers + t.Name
}
func (t *goType) IsPtr() bool {
return strings.HasPrefix(t.Modifiers, "*")
}
func (t *goType) IsSlice() bool {
return strings.HasPrefix(t.Modifiers, "[]")
}
var partsRe = regexp.MustCompile(`^([\[\]\*]*)(.*?)(\.\w*)?$`)
func parseType(str string) (*goType, error) {
parts := partsRe.FindStringSubmatch(str)
if len(parts) != 4 {
return nil, fmt.Errorf("type must be in the form []*github.com/import/path.Name")
}
t := &goType{
Modifiers: parts[1],
ImportPath: parts[2],
Name: strings.TrimPrefix(parts[3], "."),
}
if t.Name == "" {
t.Name = t.ImportPath
t.ImportPath = ""
}
if t.ImportPath != "" {
p, err := packages.Load(&packages.Config{Mode: packages.NeedName}, t.ImportPath)
if err != nil {
return nil, err
}
if len(p) != 1 {
return nil, fmt.Errorf("not found")
}
t.ImportName = p[0].Name
}
return t, nil
}
func Generate(name string, keyType string, valueType string, wd string) error {
data, err := getData(name, keyType, valueType, wd)
if err != nil {
return err
}
filename := strings.ToLower(data.Name) + "_gen.go"
if err := writeTemplate(filepath.Join(wd, filename), data); err != nil {
return err
}
return nil
}
func getData(name string, keyType string, valueType string, wd string) (templateData, error) {
var data templateData
genPkg := getPackage(wd)
if genPkg == nil {
return templateData{}, fmt.Errorf("unable to find package info for " + wd)
}
var err error
data.Name = name
data.Package = genPkg.Name
data.KeyType, err = parseType(keyType)
if err != nil {
return templateData{}, fmt.Errorf("key type: %s", err.Error())
}
data.ValType, err = parseType(valueType)
if err != nil {
return templateData{}, fmt.Errorf("key type: %s", err.Error())
}
// if we are inside the same package as the type we don't need an import and can refer directly to the type
if genPkg.PkgPath == data.ValType.ImportPath {
data.ValType.ImportName = ""
data.ValType.ImportPath = ""
}
if genPkg.PkgPath == data.KeyType.ImportPath {
data.KeyType.ImportName = ""
data.KeyType.ImportPath = ""
}
return data, nil
}
func getPackage(dir string) *packages.Package {
p, _ := packages.Load(&packages.Config{
Dir: dir,
}, ".")
if len(p) != 1 {
return nil
}
return p[0]
}
func writeTemplate(filepath string, data templateData) error {
var buf bytes.Buffer
if err := tpl.Execute(&buf, data); err != nil {
return errors.Wrap(err, "generating code")
}
src, err := imports.Process(filepath, buf.Bytes(), nil)
if err != nil {
return errors.Wrap(err, "unable to gofmt")
}
if err := ioutil.WriteFile(filepath, src, 0644); err != nil {
return errors.Wrap(err, "writing output")
}
return nil
}
func lcFirst(s string) string {
r := []rune(s)
r[0] = unicode.ToLower(r[0])
return string(r)
}

View File

@@ -0,0 +1,245 @@
package generator
import "text/template"
var tpl = template.Must(template.New("generated").
Funcs(template.FuncMap{
"lcFirst": lcFirst,
}).
Parse(`
// Code generated by github.com/vektah/dataloaden, DO NOT EDIT.
package {{.Package}}
import (
"sync"
"time"
{{if .KeyType.ImportPath}}"{{.KeyType.ImportPath}}"{{end}}
{{if .ValType.ImportPath}}"{{.ValType.ImportPath}}"{{end}}
)
// {{.Name}}Config captures the config to create a new {{.Name}}
type {{.Name}}Config struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []{{.KeyType.String}}) ([]{{.ValType.String}}, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
// New{{.Name}} creates a new {{.Name}} given a fetch, wait, and maxBatch
func New{{.Name}}(config {{.Name}}Config) *{{.Name}} {
return &{{.Name}}{
fetch: config.Fetch,
wait: config.Wait,
maxBatch: config.MaxBatch,
}
}
// {{.Name}} batches and caches requests
type {{.Name}} struct {
// this method provides the data for the loader
fetch func(keys []{{.KeyType.String}}) ([]{{.ValType.String}}, []error)
// how long to done before sending a batch
wait time.Duration
// this will limit the maximum number of keys to send in one batch, 0 = no limit
maxBatch int
// INTERNAL
// lazily created cache
cache map[{{.KeyType.String}}]{{.ValType.String}}
// the current batch. keys will continue to be collected until timeout is hit,
// then everything will be sent to the fetch method and out to the listeners
batch *{{.Name|lcFirst}}Batch
// mutex to prevent races
mu sync.Mutex
}
type {{.Name|lcFirst}}Batch struct {
keys []{{.KeyType}}
data []{{.ValType.String}}
error []error
closing bool
done chan struct{}
}
// Load a {{.ValType.Name}} by key, batching and caching will be applied automatically
func (l *{{.Name}}) Load(key {{.KeyType.String}}) ({{.ValType.String}}, error) {
return l.LoadThunk(key)()
}
// LoadThunk returns a function that when called will block waiting for a {{.ValType.Name}}.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *{{.Name}}) LoadThunk(key {{.KeyType.String}}) func() ({{.ValType.String}}, error) {
l.mu.Lock()
if it, ok := l.cache[key]; ok {
l.mu.Unlock()
return func() ({{.ValType.String}}, error) {
return it, nil
}
}
if l.batch == nil {
l.batch = &{{.Name|lcFirst}}Batch{done: make(chan struct{})}
}
batch := l.batch
pos := batch.keyIndex(l, key)
l.mu.Unlock()
return func() ({{.ValType.String}}, error) {
<-batch.done
var data {{.ValType.String}}
if pos < len(batch.data) {
data = batch.data[pos]
}
var err error
// its convenient to be able to return a single error for everything
if len(batch.error) == 1 {
err = batch.error[0]
} else if batch.error != nil {
err = batch.error[pos]
}
if err == nil {
l.mu.Lock()
l.unsafeSet(key, data)
l.mu.Unlock()
}
return data, err
}
}
// LoadAll fetches many keys at once. It will be broken into appropriate sized
// sub batches depending on how the loader is configured
func (l *{{.Name}}) LoadAll(keys []{{.KeyType}}) ([]{{.ValType.String}}, []error) {
results := make([]func() ({{.ValType.String}}, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
{{.ValType.Name|lcFirst}}s := make([]{{.ValType.String}}, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
{{.ValType.Name|lcFirst}}s[i], errors[i] = thunk()
}
return {{.ValType.Name|lcFirst}}s, errors
}
// LoadAllThunk returns a function that when called will block waiting for a {{.ValType.Name}}s.
// This method should be used if you want one goroutine to make requests to many
// different data loaders without blocking until the thunk is called.
func (l *{{.Name}}) LoadAllThunk(keys []{{.KeyType}}) (func() ([]{{.ValType.String}}, []error)) {
results := make([]func() ({{.ValType.String}}, error), len(keys))
for i, key := range keys {
results[i] = l.LoadThunk(key)
}
return func() ([]{{.ValType.String}}, []error) {
{{.ValType.Name|lcFirst}}s := make([]{{.ValType.String}}, len(keys))
errors := make([]error, len(keys))
for i, thunk := range results {
{{.ValType.Name|lcFirst}}s[i], errors[i] = thunk()
}
return {{.ValType.Name|lcFirst}}s, errors
}
}
// Prime the cache with the provided key and value. If the key already exists, no change is made
// and false is returned.
// (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
func (l *{{.Name}}) Prime(key {{.KeyType}}, value {{.ValType.String}}) bool {
l.mu.Lock()
var found bool
if _, found = l.cache[key]; !found {
{{- if .ValType.IsPtr }}
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := *value
l.unsafeSet(key, &cpy)
{{- else if .ValType.IsSlice }}
// make a copy when writing to the cache, its easy to pass a pointer in from a loop var
// and end up with the whole cache pointing to the same value.
cpy := make({{.ValType.String}}, len(value))
copy(cpy, value)
l.unsafeSet(key, cpy)
{{- else }}
l.unsafeSet(key, value)
{{- end }}
}
l.mu.Unlock()
return !found
}
// Clear the value at key from the cache, if it exists
func (l *{{.Name}}) Clear(key {{.KeyType}}) {
l.mu.Lock()
delete(l.cache, key)
l.mu.Unlock()
}
func (l *{{.Name}}) unsafeSet(key {{.KeyType}}, value {{.ValType.String}}) {
if l.cache == nil {
l.cache = map[{{.KeyType}}]{{.ValType.String}}{}
}
l.cache[key] = value
}
// keyIndex will return the location of the key in the batch, if its not found
// it will add the key to the batch
func (b *{{.Name|lcFirst}}Batch) keyIndex(l *{{.Name}}, key {{.KeyType}}) int {
for i, existingKey := range b.keys {
if key == existingKey {
return i
}
}
pos := len(b.keys)
b.keys = append(b.keys, key)
if pos == 0 {
go b.startTimer(l)
}
if l.maxBatch != 0 && pos >= l.maxBatch-1 {
if !b.closing {
b.closing = true
l.batch = nil
go b.end(l)
}
}
return pos
}
func (b *{{.Name|lcFirst}}Batch) startTimer(l *{{.Name}}) {
time.Sleep(l.wait)
l.mu.Lock()
// we must have hit a batch limit and are already finalizing this batch
if b.closing {
l.mu.Unlock()
return
}
l.batch = nil
l.mu.Unlock()
b.end(l)
}
func (b *{{.Name|lcFirst}}Batch) end(l *{{.Name}}) {
b.data, b.error = l.fetch(b.keys)
close(b.done)
}
`))